hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
bf736db6c2334e12cfb5f5a530cc2e561cd195a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) { if (comp == (-0.0f - ldexpf(var_2 - (-1.5269E35f * var_3), 2))) { comp += -1.8053E0f * var_4 * -0.0f / -1.8291E35f; for (int i=0; i < var_1; ++i) { comp = fmodf(+1.5648E-37f * (-1.4791E-37f / (+1.8672E34f + (-1.4308E36f - +1.8486E-36f))), var_5 + +0.0f); comp = var_6 * var_7 - ldexpf(+1.4046E-41f, 2); } if (comp < var_8 * var_9 + (var_10 / var_11)) { comp += +1.2062E-36f * (+1.1615E-36f * -1.6866E-8f / var_12 - (-1.5701E16f - var_13)); } if (comp <= var_14 + var_15 / (var_16 - -1.9326E-36f)) { comp += (+1.5622E-6f - (var_17 * var_18 + (var_19 + fmodf((var_20 * -1.5086E34f / atan2f(coshf((var_21 * ceilf(var_22 * +1.3922E-26f))), (var_23 + (-0.0f * (var_24 / (+1.5309E36f - var_25)))))), var_26 * (var_27 - var_28))))); comp = (var_29 + var_30); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31); hipDeviceSynchronize(); return 0; }
bf736db6c2334e12cfb5f5a530cc2e561cd195a8.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) { if (comp == (-0.0f - ldexpf(var_2 - (-1.5269E35f * var_3), 2))) { comp += -1.8053E0f * var_4 * -0.0f / -1.8291E35f; for (int i=0; i < var_1; ++i) { comp = fmodf(+1.5648E-37f * (-1.4791E-37f / (+1.8672E34f + (-1.4308E36f - +1.8486E-36f))), var_5 + +0.0f); comp = var_6 * var_7 - ldexpf(+1.4046E-41f, 2); } if (comp < var_8 * var_9 + (var_10 / var_11)) { comp += +1.2062E-36f * (+1.1615E-36f * -1.6866E-8f / var_12 - (-1.5701E16f - var_13)); } if (comp <= var_14 + var_15 / (var_16 - -1.9326E-36f)) { comp += (+1.5622E-6f - (var_17 * var_18 + (var_19 + fmodf((var_20 * -1.5086E34f / atan2f(coshf((var_21 * ceilf(var_22 * +1.3922E-26f))), (var_23 + (-0.0f * (var_24 / (+1.5309E36f - var_25)))))), var_26 * (var_27 - var_28))))); comp = (var_29 + var_30); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31); cudaDeviceSynchronize(); return 0; }
b906f48da68f4601e1dd81494a719d1d3bef3ecb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, kernels #include <assert.h> #include "helper_cuda.h" //#include <helper_cuda.h> #include "scan_largearray_kernel.h" // MP4.2 - Host Helper Functions (allocate your own data structure...) float ** BlockSums; float ** BlockSumsSummed; float ** HostSums; float ** HostSumsSummed; int * sizes; #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) void preallocBlockSums(int num_elements) { int n = num_elements; int i = 0; while ((n = ceil((float)n / (float)(BLOCK_SIZE*2))) > 1) { i ++; } i++; BlockSums = (float**)malloc(sizeof(float*) * i); BlockSumsSummed = (float**)malloc(sizeof(float*) * i); HostSums = (float**)malloc(sizeof(float*) * i); HostSumsSummed = (float**)malloc(sizeof(float*) * i); sizes = (int*) malloc(sizeof(int) * i); n = num_elements; i = 0; while ((n = ceil((float)n / (float)(BLOCK_SIZE*2))) > 1) { hipMalloc((void**)&(BlockSums[i]), n * sizeof(float)); hipMalloc((void**)&(BlockSumsSummed[i]), n * sizeof(float)); HostSums[i]= (float*)malloc(n* sizeof(float)); HostSumsSummed[i]= (float*)malloc(n* sizeof(float)); // for(int x = 0; x < n; x ++) // { // HostSums[i][x] = 0.0; // HostSumsSummed[i][x] = 0.0; // } // hipMemcpy(BlockSums[i], HostSums[i], n * sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(BlockSumsSummed[i], HostSumsSummed[i], n * sizeof(float), hipMemcpyHostToDevice); sizes[i] = n; i++; } hipMalloc((void**)&(BlockSums[i]), n * sizeof(float)); hipMalloc((void**)&(BlockSumsSummed[i]), n * sizeof(float)); HostSums[i]= (float*)malloc(n* sizeof(float)); HostSumsSummed[i]= (float*)malloc(n* sizeof(float)); sizes[i] = n; // for(int x = 0; x < n; x ++) // { // HostSums[i][x] = 0.0; // HostSumsSummed[i][x] = 0.0; // } // hipMemcpy(BlockSums[i], HostSums[i], n * sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(BlockSumsSummed[i], HostSumsSummed[i], n * sizeof(float), hipMemcpyHostToDevice); } void preallocBlockSums_(int num_elements) { } // MP4.2 - Device Functions // MP4.2 - Kernel Functions __global__ void AdjustIncr (float * arr, float * incr, int n) { if(blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 + 1 < n) { arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 + 1] += incr[blockIdx.x]; arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2] += incr[blockIdx.x]; } else if (blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 < n) { arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2] += incr[blockIdx.x]; } } __global__ void prescanArrayKernel(float *outArray, float * inArray, int numElements, float *blockSums) { __shared__ float temp[BLOCK_SIZE * 2 + BLOCK_SIZE/8]; int tid= threadIdx.x; int start = (BLOCK_SIZE * 2) * blockIdx.x; int aj, bj; aj = tid; bj = tid + BLOCK_SIZE; int bankOffsetA = CONFLICT_FREE_OFFSET(aj); int bankOffsetB = CONFLICT_FREE_OFFSET(bj); if(numElements > start + aj) { temp[aj + bankOffsetA] = inArray[start + aj]; } else { temp[aj + bankOffsetA] = 0.0; } if(numElements > start + bj) { temp[bj + bankOffsetB] = inArray[start + bj]; } else { temp[bj + bankOffsetB] = 0.0; } int offset = 1; for (int d = BLOCK_SIZE; d>0; d>>=1) { __syncthreads(); if(tid < d) { int ai = offset * (2 * tid + 1) -1; int bi = offset * (2 * tid + 2) -1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (tid == 0) { temp[BLOCK_SIZE * 2 - 1 + CONFLICT_FREE_OFFSET(BLOCK_SIZE * 2 - 1)] = 0; } for(int d = 1; d < BLOCK_SIZE * 2; d*=2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) -1; int bi = offset * (2 * tid + 2) -1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (numElements > start + aj) { outArray[start + aj] = temp[aj + bankOffsetA]; } else { outArray[start + aj] = 0; } if (numElements > start + bj) { outArray[start + bj] = temp[bj + bankOffsetB]; } else { outArray[start + bj] = 0; } if(tid == 0) blockSums[blockIdx.x] = temp[2 * BLOCK_SIZE - 1] + inArray[start + 2 * BLOCK_SIZE - 1]; } // **===-------- MP4.2 - Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArrayHelper(float *outArray, float *inArray, int numElements, int index) { // printf("starting helper index %d\n", index); dim3 dim_block, dim_grid; dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1; dim_grid.x = ceil((float)(numElements / (float)(dim_block.x * 2))); dim_grid.y = dim_grid.z = 1; hipLaunchKernelGGL(( prescanArrayKernel), dim3(dim_grid),dim3(dim_block), 0, 0, outArray, inArray, numElements, BlockSums[index]); if (dim_grid.x > 1) { prescanArrayHelper(BlockSumsSummed[index], BlockSums[index], dim_grid.x, index+1); hipLaunchKernelGGL(( AdjustIncr), dim3(dim_grid), dim3(dim_block), 0, 0, outArray, BlockSumsSummed[index], numElements); } } void prescanArray(float *outArray, float *inArray, int numElements) { prescanArrayHelper(outArray,inArray, numElements, 0); } // **===-----------------------------------------------------------===**
b906f48da68f4601e1dd81494a719d1d3bef3ecb.cu
// includes, kernels #include <assert.h> #include "helper_cuda.h" //#include <helper_cuda.h> #include "scan_largearray_kernel.h" // MP4.2 - Host Helper Functions (allocate your own data structure...) float ** BlockSums; float ** BlockSumsSummed; float ** HostSums; float ** HostSumsSummed; int * sizes; #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) void preallocBlockSums(int num_elements) { int n = num_elements; int i = 0; while ((n = ceil((float)n / (float)(BLOCK_SIZE*2))) > 1) { i ++; } i++; BlockSums = (float**)malloc(sizeof(float*) * i); BlockSumsSummed = (float**)malloc(sizeof(float*) * i); HostSums = (float**)malloc(sizeof(float*) * i); HostSumsSummed = (float**)malloc(sizeof(float*) * i); sizes = (int*) malloc(sizeof(int) * i); n = num_elements; i = 0; while ((n = ceil((float)n / (float)(BLOCK_SIZE*2))) > 1) { cudaMalloc((void**)&(BlockSums[i]), n * sizeof(float)); cudaMalloc((void**)&(BlockSumsSummed[i]), n * sizeof(float)); HostSums[i]= (float*)malloc(n* sizeof(float)); HostSumsSummed[i]= (float*)malloc(n* sizeof(float)); // for(int x = 0; x < n; x ++) // { // HostSums[i][x] = 0.0; // HostSumsSummed[i][x] = 0.0; // } // cudaMemcpy(BlockSums[i], HostSums[i], n * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(BlockSumsSummed[i], HostSumsSummed[i], n * sizeof(float), cudaMemcpyHostToDevice); sizes[i] = n; i++; } cudaMalloc((void**)&(BlockSums[i]), n * sizeof(float)); cudaMalloc((void**)&(BlockSumsSummed[i]), n * sizeof(float)); HostSums[i]= (float*)malloc(n* sizeof(float)); HostSumsSummed[i]= (float*)malloc(n* sizeof(float)); sizes[i] = n; // for(int x = 0; x < n; x ++) // { // HostSums[i][x] = 0.0; // HostSumsSummed[i][x] = 0.0; // } // cudaMemcpy(BlockSums[i], HostSums[i], n * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(BlockSumsSummed[i], HostSumsSummed[i], n * sizeof(float), cudaMemcpyHostToDevice); } void preallocBlockSums_(int num_elements) { } // MP4.2 - Device Functions // MP4.2 - Kernel Functions __global__ void AdjustIncr (float * arr, float * incr, int n) { if(blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 + 1 < n) { arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 + 1] += incr[blockIdx.x]; arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2] += incr[blockIdx.x]; } else if (blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2 < n) { arr[blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x * 2] += incr[blockIdx.x]; } } __global__ void prescanArrayKernel(float *outArray, float * inArray, int numElements, float *blockSums) { __shared__ float temp[BLOCK_SIZE * 2 + BLOCK_SIZE/8]; int tid= threadIdx.x; int start = (BLOCK_SIZE * 2) * blockIdx.x; int aj, bj; aj = tid; bj = tid + BLOCK_SIZE; int bankOffsetA = CONFLICT_FREE_OFFSET(aj); int bankOffsetB = CONFLICT_FREE_OFFSET(bj); if(numElements > start + aj) { temp[aj + bankOffsetA] = inArray[start + aj]; } else { temp[aj + bankOffsetA] = 0.0; } if(numElements > start + bj) { temp[bj + bankOffsetB] = inArray[start + bj]; } else { temp[bj + bankOffsetB] = 0.0; } int offset = 1; for (int d = BLOCK_SIZE; d>0; d>>=1) { __syncthreads(); if(tid < d) { int ai = offset * (2 * tid + 1) -1; int bi = offset * (2 * tid + 2) -1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (tid == 0) { temp[BLOCK_SIZE * 2 - 1 + CONFLICT_FREE_OFFSET(BLOCK_SIZE * 2 - 1)] = 0; } for(int d = 1; d < BLOCK_SIZE * 2; d*=2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) -1; int bi = offset * (2 * tid + 2) -1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (numElements > start + aj) { outArray[start + aj] = temp[aj + bankOffsetA]; } else { outArray[start + aj] = 0; } if (numElements > start + bj) { outArray[start + bj] = temp[bj + bankOffsetB]; } else { outArray[start + bj] = 0; } if(tid == 0) blockSums[blockIdx.x] = temp[2 * BLOCK_SIZE - 1] + inArray[start + 2 * BLOCK_SIZE - 1]; } // **===-------- MP4.2 - Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArrayHelper(float *outArray, float *inArray, int numElements, int index) { // printf("starting helper index %d\n", index); dim3 dim_block, dim_grid; dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1; dim_grid.x = ceil((float)(numElements / (float)(dim_block.x * 2))); dim_grid.y = dim_grid.z = 1; prescanArrayKernel<<<dim_grid,dim_block>>>(outArray, inArray, numElements, BlockSums[index]); if (dim_grid.x > 1) { prescanArrayHelper(BlockSumsSummed[index], BlockSums[index], dim_grid.x, index+1); AdjustIncr<<<dim_grid, dim_block>>>(outArray, BlockSumsSummed[index], numElements); } } void prescanArray(float *outArray, float *inArray, int numElements) { prescanArrayHelper(outArray,inArray, numElements, 0); } // **===-----------------------------------------------------------===**
ca63e0cfdf6d0bb677ba457c6e7a5f46862fcc2f.hip
// !!! This is a file automatically generated by hipify!!! /* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \file * \brief Define CUDA implementation of nbnxn_gpu.h * * \author Szilard Pall <pall.szilard@gmail.com> */ #include "gmxpre.h" #include "config.h" #include <assert.h> #include <stdlib.h> #include "gromacs/nbnxm/nbnxm_gpu.h" #if defined(_MSVC) #include <limits> #endif #include "nbnxm_cuda.h" #include "gromacs/gpu_utils/cudautils.cuh" #include "gromacs/mdlib/force_flags.h" #include "gromacs/nbnxm/atomdata.h" #include "gromacs/nbnxm/gpu_common.h" #include "gromacs/nbnxm/gpu_common_utils.h" #include "gromacs/nbnxm/gpu_data_mgmt.h" #include "gromacs/nbnxm/grid.h" #include "gromacs/nbnxm/nbnxm.h" #include "gromacs/nbnxm/pairlist.h" #include "gromacs/nbnxm/cuda/nbnxm_buffer_ops_kernels.cuh" #include "gromacs/timing/gpu_timing.h" #include "gromacs/utility/cstringutil.h" #include "gromacs/utility/gmxassert.h" #include "nbnxm_cuda_types.h" /***** The kernel declarations/definitions come here *****/ /* Top-level kernel declaration generation: will generate through multiple * inclusion the following flavors for all kernel declarations: * - force-only output; * - force and energy output; * - force-only with pair list pruning; * - force and energy output with pair list pruning. */ #define FUNCTION_DECLARATION_ONLY /** Force only **/ #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES /*** Pair-list pruning kernels ***/ /** Force only **/ #define PRUNE_NBL #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES #undef PRUNE_NBL /* Prune-only kernels */ #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh" #undef FUNCTION_DECLARATION_ONLY /* Now generate the function definitions if we are using a single compilation unit. */ #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu" #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */ namespace Nbnxm { /*! Nonbonded kernel function pointer type */ typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const cu_nbparam_t, const cu_plist_t, bool); /*********************************/ /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */ static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo) { int max_grid_x_size; assert(dinfo); /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an empty domain) and that case should be handled before this point. */ assert(nwork_units > 0); max_grid_x_size = dinfo->prop.maxGridSize[0]; /* do we exceed the grid x dimension limit? */ if (nwork_units > max_grid_x_size) { gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n" "The number of nonbonded work units (=number of super-clusters) exceeds the" "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size); } return nwork_units; } /* Constant arrays listing all kernel function pointers and enabling selection of a kernel in an elegant manner. */ /*! Pointers to the non-bonded kernels organized in 2-dim arrays by: * electrostatics and VDW type. * * Note that the row- and column-order of function pointers has to match the * order of corresponding enumerated electrostatics and vdw types, resp., * defined in nbnxn_cuda_types.h. */ /*! Force-only kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda } }; /*! Force + energy kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda } }; /*! Force + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda } }; /*! Force + energy + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda } }; /*! Return a pointer to the kernel version to be executed at the current step. */ static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype, int evdwtype, bool bDoEne, bool bDoPrune, const gmx_device_info_t gmx_unused *devInfo) { nbnxn_cu_kfunc_ptr_t res; GMX_ASSERT(eeltype < eelCuNR, "The electrostatics type requested is not implemented in the CUDA kernels."); GMX_ASSERT(evdwtype < evdwCuNR, "The VdW type requested is not implemented in the CUDA kernels."); /* assert assumptions made by the kernels */ GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize, "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted."); if (bDoEne) { if (bDoPrune) { res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype]; } else { res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype]; } } else { if (bDoPrune) { res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype]; } else { res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype]; } } return res; } /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */ static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp) { int shmem; assert(dinfo); /* size of shmem (force-buffers/xq/atom type preloading) */ /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */ /* i-atom x+q in shared memory */ shmem = c_numClPerSupercl * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); if (nbp->vdwtype == evdwCuCUTCOMBGEOM || nbp->vdwtype == evdwCuCUTCOMBLB) { /* i-atom LJ combination parameters in shared memory */ shmem += c_numClPerSupercl * c_clSize * sizeof(float2); } else { /* i-atom types in shared memory */ shmem += c_numClPerSupercl * c_clSize * sizeof(int); } return shmem; } /*! \brief Sync the nonlocal stream with dependent tasks in the local queue. * * As the point where the local stream tasks can be considered complete happens * at the same call point where the nonlocal stream should be synced with the * the local, this function recrds the event if called with the local stream as * argument and inserts in the GPU stream a wait on the event on the nonlocal. */ static void insertNonlocalGpuDependency(const gmx_nbnxn_cuda_t *nb, const InteractionLocality interactionLocality) { hipStream_t stream = nb->stream[interactionLocality]; /* When we get here all misc operations issued in the local stream as well as the local xq H2D are done, so we record that in the local stream and wait for it in the nonlocal one. This wait needs to precede any PP tasks, bonded or nonbonded, that may compute on interactions between local and nonlocal atoms. */ if (nb->bUseTwoStreams) { if (interactionLocality == InteractionLocality::Local) { hipError_t stat = hipEventRecord(nb->misc_ops_and_local_H2D_done, stream); CU_RET_ERR(stat, "hipEventRecord on misc_ops_and_local_H2D_done failed"); } else { hipError_t stat = hipStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0); CU_RET_ERR(stat, "hipStreamWaitEvent on misc_ops_and_local_H2D_done failed"); } } } /*! \brief Launch asynchronously the xq buffer host to device copy. */ void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb, const nbnxn_atomdata_t *nbatom, const AtomLocality atomLocality, const bool haveOtherWork) { GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal, "Only local and non-local xq transfers are supported"); const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality); int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */ cu_atomdata_t *adat = nb->atdat; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; hipStream_t stream = nb->stream[iloc]; bool bDoTime = nb->bDoTime; /* Don't launch the non-local H2D copy if there is no dependent work to do: neither non-local nor other (e.g. bonded) work to do that has as input the nbnxn coordaintes. Doing the same for the local kernel is more complicated, since the local part of the force array also depends on the non-local kernel. So to avoid complicating the code and to reduce the risk of bugs, we always call the local local x+q copy (and the rest of the local work in nbnxn_gpu_launch_kernel(). */ if (!haveOtherWork && canSkipWork(*nb, iloc)) { plist->haveFreshList = false; return; } /* calculate the atom data index range based on locality */ if (atomLocality == AtomLocality::Local) { adat_begin = 0; adat_len = adat->natoms_local; } else { adat_begin = adat->natoms_local; adat_len = adat->natoms - adat->natoms_local; } /* HtoD x, q */ /* beginning of timed HtoD section */ if (bDoTime) { t->xf[atomLocality].nb_h2d.openTimingRegion(stream); } cu_copy_H2D_async(adat->xq + adat_begin, static_cast<const void *>(nbatom->x().data() + adat_begin * 4), adat_len * sizeof(*adat->xq), stream); if (bDoTime) { t->xf[atomLocality].nb_h2d.closeTimingRegion(stream); } /* When we get here all misc operations issued in the local stream as well as the local xq H2D are done, so we record that in the local stream and wait for it in the nonlocal one. This wait needs to precede any PP tasks, bonded or nonbonded, that may compute on interactions between local and nonlocal atoms. */ insertNonlocalGpuDependency(nb, iloc); } /*! As we execute nonbonded workload in separate streams, before launching the kernel we need to make sure that he following operations have completed: - atomdata allocation and related H2D transfers (every nstlist step); - pair list H2D transfer (every nstlist step); - shift vector H2D transfer (every nstlist step); - force (+shift force and energy) output clearing (every step). These operations are issued in the local stream at the beginning of the step and therefore always complete before the local kernel launch. The non-local kernel is launched after the local on the same device/context hence it is inherently scheduled after the operations in the local stream (including the above "misc_ops") on pre-GK110 devices with single hardware queue, but on later devices with multiple hardware queues the dependency needs to be enforced. We use the misc_ops_and_local_H2D_done event to record the point where the local x+q H2D (and all preceding) tasks are complete and synchronize with this event in the non-local stream before launching the non-bonded kernel. */ void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb, const int flags, const InteractionLocality iloc) { cu_atomdata_t *adat = nb->atdat; cu_nbparam_t *nbp = nb->nbparam; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; hipStream_t stream = nb->stream[iloc]; bool bCalcEner = flags & GMX_FORCE_ENERGY; bool bCalcFshift = flags & GMX_FORCE_VIRIAL; bool bDoTime = nb->bDoTime; /* Don't launch the non-local kernel if there is no work to do. Doing the same for the local kernel is more complicated, since the local part of the force array also depends on the non-local kernel. So to avoid complicating the code and to reduce the risk of bugs, we always call the local kernel, and later (not in this function) the stream wait, local f copyback and the f buffer clearing. All these operations, except for the local interaction kernel, are needed for the non-local interactions. The skip of the local kernel call is taken care of later in this function. */ if (canSkipWork(*nb, iloc)) { plist->haveFreshList = false; return; } if (nbp->useDynamicPruning && plist->haveFreshList) { /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false (TODO: ATM that's the way the timing accounting can distinguish between separate prune kernel and combined force+prune, maybe we need a better way?). */ gpu_launch_kernel_pruneonly(nb, iloc, 1); } if (plist->nsci == 0) { /* Don't launch an empty local kernel (not allowed with CUDA) */ return; } /* beginning of timed nonbonded calculation section */ if (bDoTime) { t->interaction[iloc].nb_k.openTimingRegion(stream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = 1; if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7) { num_threads_z = 2; } int nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp); config.stream = stream; if (debug) { fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl, c_numClPerSupercl, plist->na_c, config.sharedMemorySize); } auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr; const auto kernel = select_nbnxn_kernel(nbp->eeltype, nbp->vdwtype, bCalcEner, (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune), nb->dev_info); const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &bCalcFshift); launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs); if (bDoTime) { t->interaction[iloc].nb_k.closeTimingRegion(stream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ hipStreamQuery(stream); } } /*! Calculates the amount of shared memory required by the CUDA kernel in use. */ static inline int calc_shmem_required_prune(const int num_threads_z) { int shmem; /* i-atom x in shared memory */ shmem = c_numClPerSupercl * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); return shmem; } void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb, const InteractionLocality iloc, const int numParts) { cu_atomdata_t *adat = nb->atdat; cu_nbparam_t *nbp = nb->nbparam; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; hipStream_t stream = nb->stream[iloc]; bool bDoTime = nb->bDoTime; if (plist->haveFreshList) { GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part"); /* Set rollingPruningNumParts to signal that it is not set */ plist->rollingPruningNumParts = 0; plist->rollingPruningPart = 0; } else { if (plist->rollingPruningNumParts == 0) { plist->rollingPruningNumParts = numParts; } else { GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps"); } } /* Use a local variable for part and update in plist, so we can return here * without duplicating the part increment code. */ int part = plist->rollingPruningPart; plist->rollingPruningPart++; if (plist->rollingPruningPart >= plist->rollingPruningNumParts) { plist->rollingPruningPart = 0; } /* Compute the number of list entries to prune in this pass */ int numSciInPart = (plist->nsci - part)/numParts; /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */ if (numSciInPart <= 0) { plist->haveFreshList = false; return; } GpuRegionTimer *timer = nullptr; if (bDoTime) { timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k); } /* beginning of timed prune calculation section */ if (bDoTime) { timer->openTimingRegion(stream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = c_cudaPruneKernelJ4Concurrency; int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_prune(num_threads_z); config.stream = stream; if (debug) { fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl, c_numClPerSupercl, plist->na_c, config.sharedMemorySize); } auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr; constexpr char kernelName[] = "k_pruneonly"; const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>; const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part); launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs); /* TODO: consider a more elegant way to track which kernel has been called (combined or separate 1st pass prune, rolling prune). */ if (plist->haveFreshList) { plist->haveFreshList = false; /* Mark that pruning has been done */ nb->timers->interaction[iloc].didPrune = true; } else { /* Mark that rolling pruning has been done */ nb->timers->interaction[iloc].didRollingPrune = true; } if (bDoTime) { timer->closeTimingRegion(stream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ hipStreamQuery(stream); } } void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb, nbnxn_atomdata_t *nbatom, const int flags, const AtomLocality atomLocality, const bool haveOtherWork) { hipError_t stat; int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */ /* determine interaction locality from atom locality */ const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality); /* extract the data */ cu_atomdata_t *adat = nb->atdat; cu_timers_t *t = nb->timers; bool bDoTime = nb->bDoTime; hipStream_t stream = nb->stream[iloc]; bool bCalcEner = flags & GMX_FORCE_ENERGY; bool bCalcFshift = flags & GMX_FORCE_VIRIAL; /* don't launch non-local copy-back if there was no non-local work to do */ if (!haveOtherWork && canSkipWork(*nb, iloc)) { return; } getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len); /* beginning of timed D2H section */ if (bDoTime) { t->xf[atomLocality].nb_d2h.openTimingRegion(stream); } /* With DD the local D2H transfer can only start after the non-local kernel has finished. */ if (iloc == InteractionLocality::Local && nb->bUseTwoStreams) { stat = hipStreamWaitEvent(stream, nb->nonlocal_done, 0); CU_RET_ERR(stat, "hipStreamWaitEvent on nonlocal_done failed"); } /* DtoH f */ cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin, (adat_len)*sizeof(*adat->f), stream); /* After the non-local D2H is launched the nonlocal_done event can be recorded which signals that the local D2H can proceed. This event is not placed after the non-local kernel because we want the non-local data back first. */ if (iloc == InteractionLocality::NonLocal) { stat = hipEventRecord(nb->nonlocal_done, stream); CU_RET_ERR(stat, "hipEventRecord on nonlocal_done failed"); } /* only transfer energies in the local stream */ if (iloc == InteractionLocality::Local) { /* DtoH fshift */ if (bCalcFshift) { cu_copy_D2H_async(nb->nbst.fshift, adat->fshift, SHIFTS * sizeof(*nb->nbst.fshift), stream); } /* DtoH energies */ if (bCalcEner) { cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, sizeof(*nb->nbst.e_lj), stream); cu_copy_D2H_async(nb->nbst.e_el, adat->e_el, sizeof(*nb->nbst.e_el), stream); } } if (bDoTime) { t->xf[atomLocality].nb_d2h.closeTimingRegion(stream); } } void cuda_set_cacheconfig() { hipError_t stat; for (int i = 0; i < eelCuNR; i++) { for (int j = 0; j < evdwCuNR; j++) { /* Default kernel 32/32 kB Shared/L1 */ hipFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], hipFuncCachePreferEqual); hipFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], hipFuncCachePreferEqual); hipFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], hipFuncCachePreferEqual); stat = hipFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], hipFuncCachePreferEqual); CU_RET_ERR(stat, "hipFuncSetCacheConfig failed"); } } } /* X buffer operations on GPU: performs conversion from rvec to nb format. */ void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid &grid, bool setFillerCoords, gmx_nbnxn_gpu_t *nb, void *xPmeDevicePtr, const Nbnxm::AtomLocality locality, const rvec *x) { cu_atomdata_t *adat = nb->atdat; bool bDoTime = nb->bDoTime; const int numColumns = grid.numColumns(); const int cellOffset = grid.cellOffset(); const int numAtomsPerCell = grid.numAtomsPerCell(); // TODO: Document this, one can not infer the interaction locality from the atom locality Nbnxm::InteractionLocality interactionLoc = Nbnxm::InteractionLocality::Local; int nCopyAtoms = grid.srcAtomEnd() - grid.srcAtomBegin(); int copyAtomStart = grid.srcAtomBegin(); if (locality == Nbnxm::AtomLocality::NonLocal) { interactionLoc = Nbnxm::InteractionLocality::NonLocal; } hipStream_t stream = nb->stream[interactionLoc]; // FIXME: need to either let the local stream get to the // insertNonlocalGpuDependency call or call it separately here if (nCopyAtoms == 0) // empty domain { if (interactionLoc == Nbnxm::InteractionLocality::Local) { insertNonlocalGpuDependency(nb, interactionLoc); } return; } const rvec *d_x; // copy of coordinates will be required if null pointer has been // passed to function // TODO improve this mechanism bool copyCoord = (xPmeDevicePtr == nullptr); // copy X-coordinate data to device if (copyCoord) { if (bDoTime) { nb->timers->xf[locality].nb_h2d.openTimingRegion(stream); } rvec *devicePtrDest = reinterpret_cast<rvec *> (nb->xrvec[copyAtomStart]); const rvec *devicePtrSrc = reinterpret_cast<const rvec *> (x[copyAtomStart]); copyToDeviceBuffer(&devicePtrDest, devicePtrSrc, 0, nCopyAtoms, stream, GpuApiCallBehavior::Async, nullptr); if (bDoTime) { nb->timers->xf[locality].nb_h2d.closeTimingRegion(stream); } d_x = nb->xrvec; } else //coordinates have already been copied by PME stream { d_x = (rvec*) xPmeDevicePtr; } /* launch kernel on GPU */ const int threadsPerBlock = 128; KernelLaunchConfig config; config.blockSize[0] = threadsPerBlock; config.blockSize[1] = 1; config.blockSize[2] = 1; config.gridSize[0] = (grid.numCellsColumnMax()*numAtomsPerCell + threadsPerBlock - 1)/threadsPerBlock; config.gridSize[1] = numColumns; config.gridSize[2] = 1; GMX_ASSERT(config.gridSize[0] > 0, "Can not have empty grid, early return above avoids this"); config.sharedMemorySize = 0; config.stream = stream; auto kernelFn = nbnxn_gpu_x_to_nbat_x_kernel; float *xqPtr = &(adat->xq->x); const int *d_atomIndices = nb->atomIndices; const int *d_cxy_na = nb->cxy_na[locality]; const int *d_cxy_ind = nb->cxy_ind[locality]; const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &numColumns, &xqPtr, &setFillerCoords, &d_x, &d_atomIndices, &d_cxy_na, &d_cxy_ind, &cellOffset, &numAtomsPerCell); launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs); insertNonlocalGpuDependency(nb, interactionLoc); } } // namespace Nbnxm
ca63e0cfdf6d0bb677ba457c6e7a5f46862fcc2f.cu
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \file * \brief Define CUDA implementation of nbnxn_gpu.h * * \author Szilard Pall <pall.szilard@gmail.com> */ #include "gmxpre.h" #include "config.h" #include <assert.h> #include <stdlib.h> #include "gromacs/nbnxm/nbnxm_gpu.h" #if defined(_MSVC) #include <limits> #endif #include "nbnxm_cuda.h" #include "gromacs/gpu_utils/cudautils.cuh" #include "gromacs/mdlib/force_flags.h" #include "gromacs/nbnxm/atomdata.h" #include "gromacs/nbnxm/gpu_common.h" #include "gromacs/nbnxm/gpu_common_utils.h" #include "gromacs/nbnxm/gpu_data_mgmt.h" #include "gromacs/nbnxm/grid.h" #include "gromacs/nbnxm/nbnxm.h" #include "gromacs/nbnxm/pairlist.h" #include "gromacs/nbnxm/cuda/nbnxm_buffer_ops_kernels.cuh" #include "gromacs/timing/gpu_timing.h" #include "gromacs/utility/cstringutil.h" #include "gromacs/utility/gmxassert.h" #include "nbnxm_cuda_types.h" /***** The kernel declarations/definitions come here *****/ /* Top-level kernel declaration generation: will generate through multiple * inclusion the following flavors for all kernel declarations: * - force-only output; * - force and energy output; * - force-only with pair list pruning; * - force and energy output with pair list pruning. */ #define FUNCTION_DECLARATION_ONLY /** Force only **/ #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES /*** Pair-list pruning kernels ***/ /** Force only **/ #define PRUNE_NBL #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES #undef PRUNE_NBL /* Prune-only kernels */ #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh" #undef FUNCTION_DECLARATION_ONLY /* Now generate the function definitions if we are using a single compilation unit. */ #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu" #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu" #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */ namespace Nbnxm { /*! Nonbonded kernel function pointer type */ typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t, const cu_nbparam_t, const cu_plist_t, bool); /*********************************/ /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */ static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo) { int max_grid_x_size; assert(dinfo); /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an empty domain) and that case should be handled before this point. */ assert(nwork_units > 0); max_grid_x_size = dinfo->prop.maxGridSize[0]; /* do we exceed the grid x dimension limit? */ if (nwork_units > max_grid_x_size) { gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n" "The number of nonbonded work units (=number of super-clusters) exceeds the" "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size); } return nwork_units; } /* Constant arrays listing all kernel function pointers and enabling selection of a kernel in an elegant manner. */ /*! Pointers to the non-bonded kernels organized in 2-dim arrays by: * electrostatics and VDW type. * * Note that the row- and column-order of function pointers has to match the * order of corresponding enumerated electrostatics and vdw types, resp., * defined in nbnxn_cuda_types.h. */ /*! Force-only kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda } }; /*! Force + energy kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda } }; /*! Force + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda } }; /*! Force + energy + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda } }; /*! Return a pointer to the kernel version to be executed at the current step. */ static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype, int evdwtype, bool bDoEne, bool bDoPrune, const gmx_device_info_t gmx_unused *devInfo) { nbnxn_cu_kfunc_ptr_t res; GMX_ASSERT(eeltype < eelCuNR, "The electrostatics type requested is not implemented in the CUDA kernels."); GMX_ASSERT(evdwtype < evdwCuNR, "The VdW type requested is not implemented in the CUDA kernels."); /* assert assumptions made by the kernels */ GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize, "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted."); if (bDoEne) { if (bDoPrune) { res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype]; } else { res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype]; } } else { if (bDoPrune) { res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype]; } else { res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype]; } } return res; } /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */ static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp) { int shmem; assert(dinfo); /* size of shmem (force-buffers/xq/atom type preloading) */ /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */ /* i-atom x+q in shared memory */ shmem = c_numClPerSupercl * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); if (nbp->vdwtype == evdwCuCUTCOMBGEOM || nbp->vdwtype == evdwCuCUTCOMBLB) { /* i-atom LJ combination parameters in shared memory */ shmem += c_numClPerSupercl * c_clSize * sizeof(float2); } else { /* i-atom types in shared memory */ shmem += c_numClPerSupercl * c_clSize * sizeof(int); } return shmem; } /*! \brief Sync the nonlocal stream with dependent tasks in the local queue. * * As the point where the local stream tasks can be considered complete happens * at the same call point where the nonlocal stream should be synced with the * the local, this function recrds the event if called with the local stream as * argument and inserts in the GPU stream a wait on the event on the nonlocal. */ static void insertNonlocalGpuDependency(const gmx_nbnxn_cuda_t *nb, const InteractionLocality interactionLocality) { cudaStream_t stream = nb->stream[interactionLocality]; /* When we get here all misc operations issued in the local stream as well as the local xq H2D are done, so we record that in the local stream and wait for it in the nonlocal one. This wait needs to precede any PP tasks, bonded or nonbonded, that may compute on interactions between local and nonlocal atoms. */ if (nb->bUseTwoStreams) { if (interactionLocality == InteractionLocality::Local) { cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream); CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed"); } else { cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0); CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed"); } } } /*! \brief Launch asynchronously the xq buffer host to device copy. */ void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb, const nbnxn_atomdata_t *nbatom, const AtomLocality atomLocality, const bool haveOtherWork) { GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal, "Only local and non-local xq transfers are supported"); const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality); int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */ cu_atomdata_t *adat = nb->atdat; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; cudaStream_t stream = nb->stream[iloc]; bool bDoTime = nb->bDoTime; /* Don't launch the non-local H2D copy if there is no dependent work to do: neither non-local nor other (e.g. bonded) work to do that has as input the nbnxn coordaintes. Doing the same for the local kernel is more complicated, since the local part of the force array also depends on the non-local kernel. So to avoid complicating the code and to reduce the risk of bugs, we always call the local local x+q copy (and the rest of the local work in nbnxn_gpu_launch_kernel(). */ if (!haveOtherWork && canSkipWork(*nb, iloc)) { plist->haveFreshList = false; return; } /* calculate the atom data index range based on locality */ if (atomLocality == AtomLocality::Local) { adat_begin = 0; adat_len = adat->natoms_local; } else { adat_begin = adat->natoms_local; adat_len = adat->natoms - adat->natoms_local; } /* HtoD x, q */ /* beginning of timed HtoD section */ if (bDoTime) { t->xf[atomLocality].nb_h2d.openTimingRegion(stream); } cu_copy_H2D_async(adat->xq + adat_begin, static_cast<const void *>(nbatom->x().data() + adat_begin * 4), adat_len * sizeof(*adat->xq), stream); if (bDoTime) { t->xf[atomLocality].nb_h2d.closeTimingRegion(stream); } /* When we get here all misc operations issued in the local stream as well as the local xq H2D are done, so we record that in the local stream and wait for it in the nonlocal one. This wait needs to precede any PP tasks, bonded or nonbonded, that may compute on interactions between local and nonlocal atoms. */ insertNonlocalGpuDependency(nb, iloc); } /*! As we execute nonbonded workload in separate streams, before launching the kernel we need to make sure that he following operations have completed: - atomdata allocation and related H2D transfers (every nstlist step); - pair list H2D transfer (every nstlist step); - shift vector H2D transfer (every nstlist step); - force (+shift force and energy) output clearing (every step). These operations are issued in the local stream at the beginning of the step and therefore always complete before the local kernel launch. The non-local kernel is launched after the local on the same device/context hence it is inherently scheduled after the operations in the local stream (including the above "misc_ops") on pre-GK110 devices with single hardware queue, but on later devices with multiple hardware queues the dependency needs to be enforced. We use the misc_ops_and_local_H2D_done event to record the point where the local x+q H2D (and all preceding) tasks are complete and synchronize with this event in the non-local stream before launching the non-bonded kernel. */ void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb, const int flags, const InteractionLocality iloc) { cu_atomdata_t *adat = nb->atdat; cu_nbparam_t *nbp = nb->nbparam; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; cudaStream_t stream = nb->stream[iloc]; bool bCalcEner = flags & GMX_FORCE_ENERGY; bool bCalcFshift = flags & GMX_FORCE_VIRIAL; bool bDoTime = nb->bDoTime; /* Don't launch the non-local kernel if there is no work to do. Doing the same for the local kernel is more complicated, since the local part of the force array also depends on the non-local kernel. So to avoid complicating the code and to reduce the risk of bugs, we always call the local kernel, and later (not in this function) the stream wait, local f copyback and the f buffer clearing. All these operations, except for the local interaction kernel, are needed for the non-local interactions. The skip of the local kernel call is taken care of later in this function. */ if (canSkipWork(*nb, iloc)) { plist->haveFreshList = false; return; } if (nbp->useDynamicPruning && plist->haveFreshList) { /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false (TODO: ATM that's the way the timing accounting can distinguish between separate prune kernel and combined force+prune, maybe we need a better way?). */ gpu_launch_kernel_pruneonly(nb, iloc, 1); } if (plist->nsci == 0) { /* Don't launch an empty local kernel (not allowed with CUDA) */ return; } /* beginning of timed nonbonded calculation section */ if (bDoTime) { t->interaction[iloc].nb_k.openTimingRegion(stream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = 1; if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7) { num_threads_z = 2; } int nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp); config.stream = stream; if (debug) { fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl, c_numClPerSupercl, plist->na_c, config.sharedMemorySize); } auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr; const auto kernel = select_nbnxn_kernel(nbp->eeltype, nbp->vdwtype, bCalcEner, (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune), nb->dev_info); const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &bCalcFshift); launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs); if (bDoTime) { t->interaction[iloc].nb_k.closeTimingRegion(stream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ cudaStreamQuery(stream); } } /*! Calculates the amount of shared memory required by the CUDA kernel in use. */ static inline int calc_shmem_required_prune(const int num_threads_z) { int shmem; /* i-atom x in shared memory */ shmem = c_numClPerSupercl * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); return shmem; } void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb, const InteractionLocality iloc, const int numParts) { cu_atomdata_t *adat = nb->atdat; cu_nbparam_t *nbp = nb->nbparam; cu_plist_t *plist = nb->plist[iloc]; cu_timers_t *t = nb->timers; cudaStream_t stream = nb->stream[iloc]; bool bDoTime = nb->bDoTime; if (plist->haveFreshList) { GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part"); /* Set rollingPruningNumParts to signal that it is not set */ plist->rollingPruningNumParts = 0; plist->rollingPruningPart = 0; } else { if (plist->rollingPruningNumParts == 0) { plist->rollingPruningNumParts = numParts; } else { GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps"); } } /* Use a local variable for part and update in plist, so we can return here * without duplicating the part increment code. */ int part = plist->rollingPruningPart; plist->rollingPruningPart++; if (plist->rollingPruningPart >= plist->rollingPruningNumParts) { plist->rollingPruningPart = 0; } /* Compute the number of list entries to prune in this pass */ int numSciInPart = (plist->nsci - part)/numParts; /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */ if (numSciInPart <= 0) { plist->haveFreshList = false; return; } GpuRegionTimer *timer = nullptr; if (bDoTime) { timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k); } /* beginning of timed prune calculation section */ if (bDoTime) { timer->openTimingRegion(stream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = c_cudaPruneKernelJ4Concurrency; int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_prune(num_threads_z); config.stream = stream; if (debug) { fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl, c_numClPerSupercl, plist->na_c, config.sharedMemorySize); } auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr; constexpr char kernelName[] = "k_pruneonly"; const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>; const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part); launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs); /* TODO: consider a more elegant way to track which kernel has been called (combined or separate 1st pass prune, rolling prune). */ if (plist->haveFreshList) { plist->haveFreshList = false; /* Mark that pruning has been done */ nb->timers->interaction[iloc].didPrune = true; } else { /* Mark that rolling pruning has been done */ nb->timers->interaction[iloc].didRollingPrune = true; } if (bDoTime) { timer->closeTimingRegion(stream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ cudaStreamQuery(stream); } } void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb, nbnxn_atomdata_t *nbatom, const int flags, const AtomLocality atomLocality, const bool haveOtherWork) { cudaError_t stat; int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */ /* determine interaction locality from atom locality */ const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality); /* extract the data */ cu_atomdata_t *adat = nb->atdat; cu_timers_t *t = nb->timers; bool bDoTime = nb->bDoTime; cudaStream_t stream = nb->stream[iloc]; bool bCalcEner = flags & GMX_FORCE_ENERGY; bool bCalcFshift = flags & GMX_FORCE_VIRIAL; /* don't launch non-local copy-back if there was no non-local work to do */ if (!haveOtherWork && canSkipWork(*nb, iloc)) { return; } getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len); /* beginning of timed D2H section */ if (bDoTime) { t->xf[atomLocality].nb_d2h.openTimingRegion(stream); } /* With DD the local D2H transfer can only start after the non-local kernel has finished. */ if (iloc == InteractionLocality::Local && nb->bUseTwoStreams) { stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0); CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed"); } /* DtoH f */ cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin, (adat_len)*sizeof(*adat->f), stream); /* After the non-local D2H is launched the nonlocal_done event can be recorded which signals that the local D2H can proceed. This event is not placed after the non-local kernel because we want the non-local data back first. */ if (iloc == InteractionLocality::NonLocal) { stat = cudaEventRecord(nb->nonlocal_done, stream); CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed"); } /* only transfer energies in the local stream */ if (iloc == InteractionLocality::Local) { /* DtoH fshift */ if (bCalcFshift) { cu_copy_D2H_async(nb->nbst.fshift, adat->fshift, SHIFTS * sizeof(*nb->nbst.fshift), stream); } /* DtoH energies */ if (bCalcEner) { cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, sizeof(*nb->nbst.e_lj), stream); cu_copy_D2H_async(nb->nbst.e_el, adat->e_el, sizeof(*nb->nbst.e_el), stream); } } if (bDoTime) { t->xf[atomLocality].nb_d2h.closeTimingRegion(stream); } } void cuda_set_cacheconfig() { cudaError_t stat; for (int i = 0; i < eelCuNR; i++) { for (int j = 0; j < evdwCuNR; j++) { /* Default kernel 32/32 kB Shared/L1 */ cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual); cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual); cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual); stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual); CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed"); } } } /* X buffer operations on GPU: performs conversion from rvec to nb format. */ void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid &grid, bool setFillerCoords, gmx_nbnxn_gpu_t *nb, void *xPmeDevicePtr, const Nbnxm::AtomLocality locality, const rvec *x) { cu_atomdata_t *adat = nb->atdat; bool bDoTime = nb->bDoTime; const int numColumns = grid.numColumns(); const int cellOffset = grid.cellOffset(); const int numAtomsPerCell = grid.numAtomsPerCell(); // TODO: Document this, one can not infer the interaction locality from the atom locality Nbnxm::InteractionLocality interactionLoc = Nbnxm::InteractionLocality::Local; int nCopyAtoms = grid.srcAtomEnd() - grid.srcAtomBegin(); int copyAtomStart = grid.srcAtomBegin(); if (locality == Nbnxm::AtomLocality::NonLocal) { interactionLoc = Nbnxm::InteractionLocality::NonLocal; } cudaStream_t stream = nb->stream[interactionLoc]; // FIXME: need to either let the local stream get to the // insertNonlocalGpuDependency call or call it separately here if (nCopyAtoms == 0) // empty domain { if (interactionLoc == Nbnxm::InteractionLocality::Local) { insertNonlocalGpuDependency(nb, interactionLoc); } return; } const rvec *d_x; // copy of coordinates will be required if null pointer has been // passed to function // TODO improve this mechanism bool copyCoord = (xPmeDevicePtr == nullptr); // copy X-coordinate data to device if (copyCoord) { if (bDoTime) { nb->timers->xf[locality].nb_h2d.openTimingRegion(stream); } rvec *devicePtrDest = reinterpret_cast<rvec *> (nb->xrvec[copyAtomStart]); const rvec *devicePtrSrc = reinterpret_cast<const rvec *> (x[copyAtomStart]); copyToDeviceBuffer(&devicePtrDest, devicePtrSrc, 0, nCopyAtoms, stream, GpuApiCallBehavior::Async, nullptr); if (bDoTime) { nb->timers->xf[locality].nb_h2d.closeTimingRegion(stream); } d_x = nb->xrvec; } else //coordinates have already been copied by PME stream { d_x = (rvec*) xPmeDevicePtr; } /* launch kernel on GPU */ const int threadsPerBlock = 128; KernelLaunchConfig config; config.blockSize[0] = threadsPerBlock; config.blockSize[1] = 1; config.blockSize[2] = 1; config.gridSize[0] = (grid.numCellsColumnMax()*numAtomsPerCell + threadsPerBlock - 1)/threadsPerBlock; config.gridSize[1] = numColumns; config.gridSize[2] = 1; GMX_ASSERT(config.gridSize[0] > 0, "Can not have empty grid, early return above avoids this"); config.sharedMemorySize = 0; config.stream = stream; auto kernelFn = nbnxn_gpu_x_to_nbat_x_kernel; float *xqPtr = &(adat->xq->x); const int *d_atomIndices = nb->atomIndices; const int *d_cxy_na = nb->cxy_na[locality]; const int *d_cxy_ind = nb->cxy_ind[locality]; const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config, &numColumns, &xqPtr, &setFillerCoords, &d_x, &d_atomIndices, &d_cxy_na, &d_cxy_ind, &cellOffset, &numAtomsPerCell); launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs); insertNonlocalGpuDependency(nb, interactionLoc); } } // namespace Nbnxm
cdac1193072d517a2fa75d4b865f586f9c02f9fc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #include <unistd.h> #include <string> #include <sys/time.h> #include <float.h> #include <random> //#include <rocblas.h> #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" // helper functions #include "helper_string.h" #include "helper_cuda.h" #define DEFAULT_INPUT_SIZE 8192 #define MAX_SVALUE_NO_TENSOR ((float)sqrt(FLT_MAX / DEFAULT_INPUT_SIZE)) #define MAX_SVALUE_TENSOR 65503.0 int k = 0; int size; float *A, *B, *GOLD; bool host_check = false; bool generator_debug = false; char *gold_matrix_path, *a_matrix_path, *b_matrix_path; void usage() { printf( "Usage: generateMatricesSingle -size=N [-generator_debug] [-host_check] [-input_a=<path>] [-input_b=<path>] [-gold=<path>] [-use_tensors=<0 or 1>]\n"); } void generateInputMatrices(unsigned char use_tersor_cores) { float *h_A, *h_B; FILE *f_A, *f_B; float MAX_SVALUE = MAX_SVALUE_NO_TENSOR; if (use_tersor_cores == 1) { MAX_SVALUE = MAX_SVALUE_TENSOR; } h_A = (float*) malloc( sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE); h_B = (float*) malloc( sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE); printf("Max value: %f Min: %f\n", MAX_SVALUE, -MAX_SVALUE); std::random_device rd; //Will be used to obtain a seed for the random number engine std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd() std::uniform_real_distribution<float> dis(-MAX_SVALUE, MAX_SVALUE); // srand(time(NULL)); if (!generator_debug) { for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { for (int j = 0; j < DEFAULT_INPUT_SIZE; j++) { // h_A[i * DEFAULT_INPUT_SIZE + j] = (rand() // / ((float) (RAND_MAX) + 1) * (-4.06e16 - 4.4e16)) // + 4.1e16; h_A[i * DEFAULT_INPUT_SIZE + j] = dis(gen); h_B[i * DEFAULT_INPUT_SIZE + j] = dis(gen); } } } else { for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { for (int j = 0; j < DEFAULT_INPUT_SIZE; j++) { h_A[i * DEFAULT_INPUT_SIZE + j] = float(2.0); h_B[i * DEFAULT_INPUT_SIZE + j] = float(2.0); } } } int numZeros; int numNans; int numInfs; // printf("Write\n"); f_A = fopen(a_matrix_path, "wb"); f_B = fopen(b_matrix_path, "wb"); float val; numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i < DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE; i++) { val = h_A[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix A: %d/%d/%d\n", numZeros, numNans, numInfs); numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i < DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE; i++) { val = h_B[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix B: %d/%d/%d\n", numZeros, numNans, numInfs); for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_A[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_A); } printf("Element 32 of matrix A: %f\n", (float) h_A[32]); printf("Element 50 of matrix B: %f\n", (float) h_B[50]); for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_B[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_B); } printf("Done\n"); fclose(f_A); fclose(f_B); free(h_A); free(h_B); return; } void ReadMatrixFromFile() { int i; FILE *f_A, *f_B; f_A = fopen(a_matrix_path, "rb"); f_B = fopen(b_matrix_path, "rb"); if (!(f_A && f_B)) { printf("Error opening matrices A, B.\n"); printf("exit on line: %d", __LINE__); exit(-1); } size_t ret_value[2]; for (i = 0; i < k; i++) { ret_value[0] = fread(&A[k * i], sizeof(float) * k, 1, f_A); ret_value[1] = fread(&B[k * i], sizeof(float) * k, 1, f_B); if (ret_value[0] != 1 || ret_value[1] != 1) { printf("Bad input/gold formatting: %lu ; %lu .\n", ret_value[0], ret_value[1]); } } printf("Done reading matrices\n"); fclose(f_A); fclose(f_B); } void GetDevice() { hipDeviceProp_t prop; hipError_t teste; int count = 0; teste = hipGetDeviceCount(&count); printf("Get Device Test: %s\n", hipGetErrorString(teste)); for (int i = 0; i < count; i++) { hipGetDeviceProperties(&prop, i); printf("Name: %s\n", prop.name); } int *ndevice; int dev = 0; ndevice = &dev; hipGetDevice(ndevice); hipSetDevice(0); hipGetDeviceProperties(&prop, 0); printf("\ndevice: %d %s\n", *ndevice, prop.name); } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp, &tzp); return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6); } float* openmpMul(float* a, float* b, size_t size) { double time = mysecond(); float* bT = (float*) malloc(sizeof(float) * size * size); float* c = (float*) calloc(size * size, sizeof(float)); if (c == NULL || bT == NULL) { printf("could not alloc hostGold matrix."); return NULL; } #pragma omp parallel for for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) bT[j * size + i] = b[i * size + j]; #pragma omp parallel for for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { for (int k = 0; k < size; k++) { c[i * size + j] += a[j * size + k] * bT[i * size + k]; } } } printf("host mmul time: %.2f seconds\n", mysecond() - time); return c; } void generateGoldMatrixHalf(unsigned char use_tensor_cores) { //////////////////////////////////////////////////// /////////////CUBLAS GEMM VARS/////////////////////// const float alpha = 1.0; const float beta = 1.0; hipblasOperation_t transa = HIPBLAS_OP_T, transb = HIPBLAS_OP_T; //////////////////////////////////////////////////// // Alloc blas handle hipblasHandle_t blas_handle; checkCudaErrors(hipblasCreate(&blas_handle)); printf("Tensor cores %d, is handle defined? %d\n", use_tensor_cores, (blas_handle && true)); if (use_tensor_cores == 0) { cublasSetMathMode(blas_handle, CUBLAS_DEFAULT_MATH); } else if (use_tensor_cores == 1) { cublasSetMathMode(blas_handle, CUBLAS_TENSOR_OP_MATH); } //////////////////////////////////////////////////// //////////DEVICE VARS/////////////////////////////// float *d_A; float *d_B; float *d_C; //////////////////////////////////////////////////// A = (float*) malloc(size * sizeof(float)); B = (float*) malloc(size * sizeof(float)); GOLD = (float*) malloc(size * sizeof(float)); ReadMatrixFromFile(); if (k <= 16) { printf("\nMatrix A: \n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) A[i]); if ((i + 1) % k == 0) printf("\n"); } printf("\nMatrix B: \n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) B[i]); if ((i + 1) % k == 0) printf("\n"); } } checkCudaErrors(hipMalloc((void** ) &d_A, size * sizeof(float))); checkCudaErrors(hipMalloc((void** ) &d_B, size * sizeof(float))); checkCudaErrors(hipMalloc((void** ) &d_C, size * sizeof(float))); checkCudaErrors(hipMemset(d_C, 0, size * sizeof(float))); // ZERA C checkCudaErrors( hipMemcpy(d_A, A, size * sizeof(float), hipMemcpyHostToDevice)); // PUSH A checkCudaErrors( hipMemcpy(d_B, B, size * sizeof(float), hipMemcpyHostToDevice)); // PUSH B printf("cudaSGEMM... k=%d\n", k); double time = mysecond(); hipblasSgemm(blas_handle, transa, transb, k, k, k, &alpha, d_A, k, d_B, k, &beta, d_C, k); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipPeekAtLastError()); time = mysecond() - time; /////////// PERF double flops = 2.0 * (double) k * k * k; double gflops = flops / time; double outputpersec = (double) k * k / time; printf("kernel time: %lf\n", time); printf("SIZE:%d OUTPUT/S:%f FLOPS:%f (GFLOPS:%.2f)\n", k, outputpersec, gflops, gflops / 1000000000); /////////// checkCudaErrors( hipMemcpy(GOLD, d_C, size * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("Analysing output on host...\n"); int i, j; FILE *f_GOLD; f_GOLD = fopen(gold_matrix_path, "wb"); float val; int numZeros = 0; int numNans = 0; int numInfs = 0; float maxAbsVal = 0.0; #pragma omp parallel for for (int i = 0; i < k * k; i++) { val = GOLD[i]; if (fabs(val) > maxAbsVal) { #pragma omp critical maxAbsVal = max(fabs(val), maxAbsVal); } if (val == 0) { #pragma omp atomic numZeros++; if (numZeros < 5) printf("Zero in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } if (isnan(val)) { #pragma omp atomic numNans++; if (numNans < 5) printf("NaN in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } if (isinf(val)) { #pragma omp atomic numInfs++; if (numInfs < 5) printf("INF in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } } printf("Number of zeros/NaNs/INFs on gold: %d/%d/%d\n", numZeros, numNans, numInfs); printf("Maximum absolute value on gold: %f\n", maxAbsVal); if (k <= 16) { for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) GOLD[i]); if ((i + 1) % k == 0) printf("\n"); } } if (host_check) { printf("Calculating mMul using OpenMP on Host...\n"); float *hostGold = openmpMul(A, B, k); if (k <= 16) { printf("Host CPU Gold:\n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) hostGold[i]); if ((i + 1) % k == 0) printf("\n"); } } printf("Comparing GPU result with Host result...\n"); float maxDiff = 0.0; float maxAbsDiff = 0.0; for (i = 0; i < k; i++) { for (j = 0; j < k; j++) { register float diff = fabs( (hostGold[i * k + j] - GOLD[i * k + j]) / hostGold[i * k + j]); register float absDiff = hostGold[i * k + j] - GOLD[i * k + j]; if (diff > maxDiff) { maxDiff = max(diff, maxDiff); printf( "New diff! (%d,%d) hostGold!=gpuGold %e != %e (diff: %e)\n", i, j, hostGold[i * k + j], GOLD[i * k + j], diff); } if (absDiff > maxAbsDiff) { maxAbsDiff = max(absDiff, maxAbsDiff); } // if (diff > 0.1) { // printf("Fail! (%d,%d) hostGold!=gpuGold %f != %f (diff: %e)\n", i, j, (float)hostGold[i*k+j], (float)GOLD[i*k+j], diff); // fflush(stdout); // exit(-1); // } } } printf( "CPU and GPU match by a relative error of up to %e element difference.\nMaximum element absolute difference: %e (relatively to float representation: %e)\nWriting to file...\n", maxDiff, maxAbsDiff, maxAbsDiff / FLT_MAX); } //printf("-------------------------\n%.10f\n%.10f\n%.10f\n", GOLD[0], GOLD[1], GOLD[2]); for (i = 0; i < k; i++) { fwrite(&(GOLD[i * k]), sizeof(float) * k, 1, f_GOLD); } fclose(f_GOLD); hipblasDestroy(blas_handle); return; } int main(int argc, char** argv) { //==================================== //================== Read parameters unsigned char use_tensor_cores = 0; if (argc < 2) { usage(); exit(-1); } if (checkCmdLineFlag(argc, (const char **) argv, "size")) { k = getCmdLineArgumentInt(argc, (const char **) argv, "size"); if ((k <= 0) || (k % 16 != 0)) { printf("Invalid input size given on the command-line: %d\n", k); printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE); } } else { usage(); printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **) argv, "input_a")) { getCmdLineArgumentString(argc, (const char **) argv, "input_a", &a_matrix_path); } else { a_matrix_path = new char[100]; snprintf(a_matrix_path, 100, "sgemm_a_%i.matrix", (signed int) DEFAULT_INPUT_SIZE); printf("Using default input_a path: %s\n", a_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "input_b")) { getCmdLineArgumentString(argc, (const char **) argv, "input_b", &b_matrix_path); } else { b_matrix_path = new char[100]; snprintf(b_matrix_path, 100, "sgemm_b_%i.matrix", (signed int) DEFAULT_INPUT_SIZE); printf("Using default input_a path: %s\n", b_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "gold")) { getCmdLineArgumentString(argc, (const char **) argv, "gold", &gold_matrix_path); } else { gold_matrix_path = new char[100]; snprintf(gold_matrix_path, 100, "sgemm_gold_%i.matrix", (signed int) k); printf("Using default gold path: %s\n", gold_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "host_check")) { host_check = true; } if (checkCmdLineFlag(argc, (const char **) argv, "generator_debug")) { generator_debug = true; } //flag for tensor cores if (checkCmdLineFlag(argc, (const char **) argv, "use_tensors")) { use_tensor_cores = getCmdLineArgumentInt(argc, (const char **) argv, "use_tensors"); } //==================================== GetDevice(); size = k * k; printf("Each input matrix size: %.4fGB\n", (float) sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE / (1024 * 1024 * 1024)); FILE *test_file; test_file = fopen(a_matrix_path, "rb"); if (!test_file) { printf("Generating input matrices...\n"); generateInputMatrices(use_tensor_cores); } else { printf("Input matrices already exist...\n"); } generateGoldMatrixHalf(use_tensor_cores); return 0; }
cdac1193072d517a2fa75d4b865f586f9c02f9fc.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #include <unistd.h> #include <string> #include <sys/time.h> #include <float.h> #include <random> //#include <cublas.h> #include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" // helper functions #include "helper_string.h" #include "helper_cuda.h" #define DEFAULT_INPUT_SIZE 8192 #define MAX_SVALUE_NO_TENSOR ((float)sqrt(FLT_MAX / DEFAULT_INPUT_SIZE)) #define MAX_SVALUE_TENSOR 65503.0 int k = 0; int size; float *A, *B, *GOLD; bool host_check = false; bool generator_debug = false; char *gold_matrix_path, *a_matrix_path, *b_matrix_path; void usage() { printf( "Usage: generateMatricesSingle -size=N [-generator_debug] [-host_check] [-input_a=<path>] [-input_b=<path>] [-gold=<path>] [-use_tensors=<0 or 1>]\n"); } void generateInputMatrices(unsigned char use_tersor_cores) { float *h_A, *h_B; FILE *f_A, *f_B; float MAX_SVALUE = MAX_SVALUE_NO_TENSOR; if (use_tersor_cores == 1) { MAX_SVALUE = MAX_SVALUE_TENSOR; } h_A = (float*) malloc( sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE); h_B = (float*) malloc( sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE); printf("Max value: %f Min: %f\n", MAX_SVALUE, -MAX_SVALUE); std::random_device rd; //Will be used to obtain a seed for the random number engine std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd() std::uniform_real_distribution<float> dis(-MAX_SVALUE, MAX_SVALUE); // srand(time(NULL)); if (!generator_debug) { for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { for (int j = 0; j < DEFAULT_INPUT_SIZE; j++) { // h_A[i * DEFAULT_INPUT_SIZE + j] = (rand() // / ((float) (RAND_MAX) + 1) * (-4.06e16 - 4.4e16)) // + 4.1e16; h_A[i * DEFAULT_INPUT_SIZE + j] = dis(gen); h_B[i * DEFAULT_INPUT_SIZE + j] = dis(gen); } } } else { for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { for (int j = 0; j < DEFAULT_INPUT_SIZE; j++) { h_A[i * DEFAULT_INPUT_SIZE + j] = float(2.0); h_B[i * DEFAULT_INPUT_SIZE + j] = float(2.0); } } } int numZeros; int numNans; int numInfs; // printf("Write\n"); f_A = fopen(a_matrix_path, "wb"); f_B = fopen(b_matrix_path, "wb"); float val; numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i < DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE; i++) { val = h_A[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix A: %d/%d/%d\n", numZeros, numNans, numInfs); numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i < DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE; i++) { val = h_B[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix B: %d/%d/%d\n", numZeros, numNans, numInfs); for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_A[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_A); } printf("Element 32 of matrix A: %f\n", (float) h_A[32]); printf("Element 50 of matrix B: %f\n", (float) h_B[50]); for (int i = 0; i < DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_B[i * DEFAULT_INPUT_SIZE]), sizeof(float) * DEFAULT_INPUT_SIZE, 1, f_B); } printf("Done\n"); fclose(f_A); fclose(f_B); free(h_A); free(h_B); return; } void ReadMatrixFromFile() { int i; FILE *f_A, *f_B; f_A = fopen(a_matrix_path, "rb"); f_B = fopen(b_matrix_path, "rb"); if (!(f_A && f_B)) { printf("Error opening matrices A, B.\n"); printf("exit on line: %d", __LINE__); exit(-1); } size_t ret_value[2]; for (i = 0; i < k; i++) { ret_value[0] = fread(&A[k * i], sizeof(float) * k, 1, f_A); ret_value[1] = fread(&B[k * i], sizeof(float) * k, 1, f_B); if (ret_value[0] != 1 || ret_value[1] != 1) { printf("Bad input/gold formatting: %lu ; %lu .\n", ret_value[0], ret_value[1]); } } printf("Done reading matrices\n"); fclose(f_A); fclose(f_B); } void GetDevice() { cudaDeviceProp prop; cudaError_t teste; int count = 0; teste = cudaGetDeviceCount(&count); printf("Get Device Test: %s\n", cudaGetErrorString(teste)); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); printf("Name: %s\n", prop.name); } int *ndevice; int dev = 0; ndevice = &dev; cudaGetDevice(ndevice); cudaSetDevice(0); cudaGetDeviceProperties(&prop, 0); printf("\ndevice: %d %s\n", *ndevice, prop.name); } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp, &tzp); return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6); } float* openmpMul(float* a, float* b, size_t size) { double time = mysecond(); float* bT = (float*) malloc(sizeof(float) * size * size); float* c = (float*) calloc(size * size, sizeof(float)); if (c == NULL || bT == NULL) { printf("could not alloc hostGold matrix."); return NULL; } #pragma omp parallel for for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) bT[j * size + i] = b[i * size + j]; #pragma omp parallel for for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { for (int k = 0; k < size; k++) { c[i * size + j] += a[j * size + k] * bT[i * size + k]; } } } printf("host mmul time: %.2f seconds\n", mysecond() - time); return c; } void generateGoldMatrixHalf(unsigned char use_tensor_cores) { //////////////////////////////////////////////////// /////////////CUBLAS GEMM VARS/////////////////////// const float alpha = 1.0; const float beta = 1.0; cublasOperation_t transa = CUBLAS_OP_T, transb = CUBLAS_OP_T; //////////////////////////////////////////////////// // Alloc blas handle cublasHandle_t blas_handle; checkCudaErrors(cublasCreate(&blas_handle)); printf("Tensor cores %d, is handle defined? %d\n", use_tensor_cores, (blas_handle && true)); if (use_tensor_cores == 0) { cublasSetMathMode(blas_handle, CUBLAS_DEFAULT_MATH); } else if (use_tensor_cores == 1) { cublasSetMathMode(blas_handle, CUBLAS_TENSOR_OP_MATH); } //////////////////////////////////////////////////// //////////DEVICE VARS/////////////////////////////// float *d_A; float *d_B; float *d_C; //////////////////////////////////////////////////// A = (float*) malloc(size * sizeof(float)); B = (float*) malloc(size * sizeof(float)); GOLD = (float*) malloc(size * sizeof(float)); ReadMatrixFromFile(); if (k <= 16) { printf("\nMatrix A: \n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) A[i]); if ((i + 1) % k == 0) printf("\n"); } printf("\nMatrix B: \n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) B[i]); if ((i + 1) % k == 0) printf("\n"); } } checkCudaErrors(cudaMalloc((void** ) &d_A, size * sizeof(float))); checkCudaErrors(cudaMalloc((void** ) &d_B, size * sizeof(float))); checkCudaErrors(cudaMalloc((void** ) &d_C, size * sizeof(float))); checkCudaErrors(cudaMemset(d_C, 0, size * sizeof(float))); // ZERA C checkCudaErrors( cudaMemcpy(d_A, A, size * sizeof(float), cudaMemcpyHostToDevice)); // PUSH A checkCudaErrors( cudaMemcpy(d_B, B, size * sizeof(float), cudaMemcpyHostToDevice)); // PUSH B printf("cudaSGEMM... k=%d\n", k); double time = mysecond(); cublasSgemm(blas_handle, transa, transb, k, k, k, &alpha, d_A, k, d_B, k, &beta, d_C, k); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaPeekAtLastError()); time = mysecond() - time; /////////// PERF double flops = 2.0 * (double) k * k * k; double gflops = flops / time; double outputpersec = (double) k * k / time; printf("kernel time: %lf\n", time); printf("SIZE:%d OUTPUT/S:%f FLOPS:%f (GFLOPS:%.2f)\n", k, outputpersec, gflops, gflops / 1000000000); /////////// checkCudaErrors( cudaMemcpy(GOLD, d_C, size * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("Analysing output on host...\n"); int i, j; FILE *f_GOLD; f_GOLD = fopen(gold_matrix_path, "wb"); float val; int numZeros = 0; int numNans = 0; int numInfs = 0; float maxAbsVal = 0.0; #pragma omp parallel for for (int i = 0; i < k * k; i++) { val = GOLD[i]; if (fabs(val) > maxAbsVal) { #pragma omp critical maxAbsVal = max(fabs(val), maxAbsVal); } if (val == 0) { #pragma omp atomic numZeros++; if (numZeros < 5) printf("Zero in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } if (isnan(val)) { #pragma omp atomic numNans++; if (numNans < 5) printf("NaN in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } if (isinf(val)) { #pragma omp atomic numInfs++; if (numInfs < 5) printf("INF in position (%d,%d)\n", (int) floor(i / k), (int) (i - floor(i / k) * k)); } } printf("Number of zeros/NaNs/INFs on gold: %d/%d/%d\n", numZeros, numNans, numInfs); printf("Maximum absolute value on gold: %f\n", maxAbsVal); if (k <= 16) { for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) GOLD[i]); if ((i + 1) % k == 0) printf("\n"); } } if (host_check) { printf("Calculating mMul using OpenMP on Host...\n"); float *hostGold = openmpMul(A, B, k); if (k <= 16) { printf("Host CPU Gold:\n"); for (int i = 0; i < k * k; i++) { printf(" %.2e", (float) hostGold[i]); if ((i + 1) % k == 0) printf("\n"); } } printf("Comparing GPU result with Host result...\n"); float maxDiff = 0.0; float maxAbsDiff = 0.0; for (i = 0; i < k; i++) { for (j = 0; j < k; j++) { register float diff = fabs( (hostGold[i * k + j] - GOLD[i * k + j]) / hostGold[i * k + j]); register float absDiff = hostGold[i * k + j] - GOLD[i * k + j]; if (diff > maxDiff) { maxDiff = max(diff, maxDiff); printf( "New diff! (%d,%d) hostGold!=gpuGold %e != %e (diff: %e)\n", i, j, hostGold[i * k + j], GOLD[i * k + j], diff); } if (absDiff > maxAbsDiff) { maxAbsDiff = max(absDiff, maxAbsDiff); } // if (diff > 0.1) { // printf("Fail! (%d,%d) hostGold!=gpuGold %f != %f (diff: %e)\n", i, j, (float)hostGold[i*k+j], (float)GOLD[i*k+j], diff); // fflush(stdout); // exit(-1); // } } } printf( "CPU and GPU match by a relative error of up to %e element difference.\nMaximum element absolute difference: %e (relatively to float representation: %e)\nWriting to file...\n", maxDiff, maxAbsDiff, maxAbsDiff / FLT_MAX); } //printf("-------------------------\n%.10f\n%.10f\n%.10f\n", GOLD[0], GOLD[1], GOLD[2]); for (i = 0; i < k; i++) { fwrite(&(GOLD[i * k]), sizeof(float) * k, 1, f_GOLD); } fclose(f_GOLD); cublasDestroy(blas_handle); return; } int main(int argc, char** argv) { //==================================== //================== Read parameters unsigned char use_tensor_cores = 0; if (argc < 2) { usage(); exit(-1); } if (checkCmdLineFlag(argc, (const char **) argv, "size")) { k = getCmdLineArgumentInt(argc, (const char **) argv, "size"); if ((k <= 0) || (k % 16 != 0)) { printf("Invalid input size given on the command-line: %d\n", k); printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE); } } else { usage(); printf("exit on line: %d", __LINE__); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **) argv, "input_a")) { getCmdLineArgumentString(argc, (const char **) argv, "input_a", &a_matrix_path); } else { a_matrix_path = new char[100]; snprintf(a_matrix_path, 100, "sgemm_a_%i.matrix", (signed int) DEFAULT_INPUT_SIZE); printf("Using default input_a path: %s\n", a_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "input_b")) { getCmdLineArgumentString(argc, (const char **) argv, "input_b", &b_matrix_path); } else { b_matrix_path = new char[100]; snprintf(b_matrix_path, 100, "sgemm_b_%i.matrix", (signed int) DEFAULT_INPUT_SIZE); printf("Using default input_a path: %s\n", b_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "gold")) { getCmdLineArgumentString(argc, (const char **) argv, "gold", &gold_matrix_path); } else { gold_matrix_path = new char[100]; snprintf(gold_matrix_path, 100, "sgemm_gold_%i.matrix", (signed int) k); printf("Using default gold path: %s\n", gold_matrix_path); } if (checkCmdLineFlag(argc, (const char **) argv, "host_check")) { host_check = true; } if (checkCmdLineFlag(argc, (const char **) argv, "generator_debug")) { generator_debug = true; } //flag for tensor cores if (checkCmdLineFlag(argc, (const char **) argv, "use_tensors")) { use_tensor_cores = getCmdLineArgumentInt(argc, (const char **) argv, "use_tensors"); } //==================================== GetDevice(); size = k * k; printf("Each input matrix size: %.4fGB\n", (float) sizeof(float) * DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE / (1024 * 1024 * 1024)); FILE *test_file; test_file = fopen(a_matrix_path, "rb"); if (!test_file) { printf("Generating input matrices...\n"); generateInputMatrices(use_tensor_cores); } else { printf("Input matrices already exist...\n"); } generateGoldMatrixHalf(use_tensor_cores); return 0; }
81fe2985ff712ea161a6eee18d09c6f5b650d373.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cmath> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <random> #include <vector> #include <common/cudart_utils.h> #include <metrics/batched/information_criterion.cuh> #include "../test_utils.h" namespace MLCommon { namespace Metrics { namespace Batched { template <typename T> void naive_ic(T *h_ic, const T *h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples) { T ic_base; T N = static_cast<T>(n_params); T M = static_cast<T>(n_samples); switch (ic_type) { case AIC: ic_base = (T)2 * N; break; case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break; case BIC: ic_base = ::log(M) * N; break; } #pragma omp parallel for for (int bid = 0; bid < batch_size; bid++) { h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid]; } } template <typename T> struct BatchedICInputs { int batch_size; int n_params; int n_samples; IC_Type ic_type; T tolerance; }; template <typename T> class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam(); // Create stream and allocator CUDA_CHECK(hipStreamCreate(&stream)); allocator = std::make_shared<raft::mr::device::default_allocator>(); // Create arrays std::vector<T> loglike_h = std::vector<T>(params.batch_size); res_h.resize(params.batch_size); T *loglike_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream); res_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log for (int i = 0; i < params.batch_size; i++) loglike_h[i] = ::log(udis(gen)); // Copy the data to the device updateDevice(loglike_d, loglike_h.data(), params.batch_size, stream); // Compute the tested results information_criterion(res_d, loglike_d, params.ic_type, params.n_params, params.batch_size, params.n_samples, stream); // Compute the expected results naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params, params.batch_size, params.n_samples); allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream); } void TearDown() override { allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream); CUDA_CHECK(hipStreamDestroy(stream)); } protected: std::shared_ptr<raft::mr::device::default_allocator> allocator; BatchedICInputs<T> params; T *res_d; std::vector<T> res_h; hipStream_t stream; }; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<double>> inputsd = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<float>> inputsf = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; using BatchedICTestD = BatchedICTest<double>; using BatchedICTestF = BatchedICTest<float>; TEST_P(BatchedICTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size, CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedICTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size, CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Metrics } // namespace MLCommon
81fe2985ff712ea161a6eee18d09c6f5b650d373.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cmath> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <random> #include <vector> #include <common/cudart_utils.h> #include <metrics/batched/information_criterion.cuh> #include "../test_utils.h" namespace MLCommon { namespace Metrics { namespace Batched { template <typename T> void naive_ic(T *h_ic, const T *h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples) { T ic_base; T N = static_cast<T>(n_params); T M = static_cast<T>(n_samples); switch (ic_type) { case AIC: ic_base = (T)2 * N; break; case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break; case BIC: ic_base = std::log(M) * N; break; } #pragma omp parallel for for (int bid = 0; bid < batch_size; bid++) { h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid]; } } template <typename T> struct BatchedICInputs { int batch_size; int n_params; int n_samples; IC_Type ic_type; T tolerance; }; template <typename T> class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam(); // Create stream and allocator CUDA_CHECK(cudaStreamCreate(&stream)); allocator = std::make_shared<raft::mr::device::default_allocator>(); // Create arrays std::vector<T> loglike_h = std::vector<T>(params.batch_size); res_h.resize(params.batch_size); T *loglike_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream); res_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log for (int i = 0; i < params.batch_size; i++) loglike_h[i] = std::log(udis(gen)); // Copy the data to the device updateDevice(loglike_d, loglike_h.data(), params.batch_size, stream); // Compute the tested results information_criterion(res_d, loglike_d, params.ic_type, params.n_params, params.batch_size, params.n_samples, stream); // Compute the expected results naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params, params.batch_size, params.n_samples); allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream); } void TearDown() override { allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: std::shared_ptr<raft::mr::device::default_allocator> allocator; BatchedICInputs<T> params; T *res_d; std::vector<T> res_h; cudaStream_t stream; }; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<double>> inputsd = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<float>> inputsf = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; using BatchedICTestD = BatchedICTest<double>; using BatchedICTestF = BatchedICTest<float>; TEST_P(BatchedICTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size, CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedICTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size, CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Metrics } // namespace MLCommon
ba95706a430ff6655840741e39503828d673f7ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * usage: nvcc ./stream_test.cu -o ./stream_legacy * nvvp ./stream_legacy ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_legacy ) * ... versus ... * nvcc --default-stream per-thread ./stream_test.cu -o ./stream_per-thread * nvvp ./stream_per-thread ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_per-thread ) */ const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x; for (int i = tid; i < n; i += blockDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { const int num_streams = 8; hipStream_t streams[num_streams]; float *data[num_streams]; for (int i = 0; i < num_streams; i++) { hipStreamCreate(&streams[i]); hipMalloc(&data[i], N * sizeof(float)); // launch one worker kernel per stream hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, streams[i], data[i], N); // launch a dummy kernel on the default stream hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, 0, 0); } hipDeviceReset(); return 0; }
ba95706a430ff6655840741e39503828d673f7ba.cu
/* * usage: nvcc ./stream_test.cu -o ./stream_legacy * nvvp ./stream_legacy ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_legacy ) * ... versus ... * nvcc --default-stream per-thread ./stream_test.cu -o ./stream_per-thread * nvvp ./stream_per-thread ( or as root: * nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_per-thread ) */ const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x; for (int i = tid; i < n; i += blockDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { const int num_streams = 8; cudaStream_t streams[num_streams]; float *data[num_streams]; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&streams[i]); cudaMalloc(&data[i], N * sizeof(float)); // launch one worker kernel per stream kernel<<<1, 64, 0, streams[i]>>>(data[i], N); // launch a dummy kernel on the default stream kernel<<<1, 1>>>(0, 0); } cudaDeviceReset(); return 0; }
20eed8f4034f8968a83d547bdbf095381ee205d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <random> constexpr int TILE_DIM = 32; __global__ void multiplication( int * a, int * b, int * c, int dim) { // create shared memory __shared__ int m[TILE_DIM][TILE_DIM]; __shared__ int n[TILE_DIM][TILE_DIM]; // indices will be stored in registers (automatic variables) int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; // compute the columns and row (TILE_DIM == blockDim.x/y) int column = tx + bx * TILE_DIM; int row = ty + by * TILE_DIM; int value = 0; int limit = ::ceil( dim/static_cast< float >( TILE_DIM) ); // loop over the m and n tiles // strip-mining: break a long-running loop into phases // each phase consists of an inner loop // that executes a number of consecutive // steps of the original loop for ( int phase = 0; phase < limit; ++phase) { // load tiles into shared memory m[ty][tx] = a[row * dim + phase * TILE_DIM + tx]; n[ty][tx] = b[(phase * TILE_DIM + ty) * dim + column]; // wait till all threads in the block have finished loading // the tiles into shared memory __syncthreads(); // compute the dot product for ( int k = 0; k < TILE_DIM; ++k) { value += m[ty][k] * n[k][tx]; } // wait till all threads in the block have finished // computing the dot product __syncthreads(); } c[row * dim + column] = value; } void compute_on_device( int dim, int * host_a, int * host_b, int * host_c) { constexpr int tile_dim = 32; // allocate device memory int * device_a, * device_b, * device_c; hipMalloc( & device_a, dim * dim * sizeof( int) ); hipMalloc( & device_b, dim * dim * sizeof( int) ); hipMalloc( & device_c, dim * dim * sizeof( int) ); // copy input matrices from host to device memory hipMemcpy( device_a, host_a, dim * dim * sizeof( int), hipMemcpyHostToDevice); hipMemcpy( device_b, host_b, dim * dim * sizeof( int), hipMemcpyHostToDevice); dim3 block_dim{ tile_dim, tile_dim }; dim3 grid_dim{ static_cast< unsigned int >( ::ceil( dim/static_cast< float >( block_dim.x) ) ), static_cast< unsigned int >( ::ceil( dim/static_cast< float >( block_dim.y) ) ) }; auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiplication), dim3(grid_dim), dim3(block_dim) , 0, 0, device_a, device_b, device_c, dim); hipDeviceSynchronize(); auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "device: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; hipMemcpy( host_c, device_c, dim * dim * sizeof( int), hipMemcpyDeviceToHost); hipFree( device_a); hipFree( device_b); hipFree( device_c); } void compute_on_host( int dim, int * a, int * b, int * c) { auto start = std::chrono::high_resolution_clock::now(); for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { for ( int k = 0; k < dim; ++k) { c[row * dim + column] += a[row * dim + k] * b[k * dim + column]; } } } auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "host: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; } bool equal( int dim, int * host, int * device) { for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { if ( host[row * dim + column] != device[row * dim + column]) { return false; } } } return true; } int main() { constexpr int dim = 1024; // allocate host memory int * host_a = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); int * host_b = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); // initialize input matrices std::minstd_rand generator; std::uniform_int_distribution<> distribution{ 0, 255 }; for ( unsigned int i = 0; i < dim*dim; ++i) { host_a[i] = distribution( generator); host_b[i] = host_a[i]; } // multiplication on host int * host_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_host( dim, host_a, host_b, host_c); // multiplication on device int * device_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_device( dim, host_a, host_b, device_c); if ( ! equal( dim, host_c, device_c) ) { std::cout << "matrices are not equal" << std::endl; } std::free( host_a); std::free( host_b); std::free( host_c); std::free( device_c); return EXIT_SUCCESS; }
20eed8f4034f8968a83d547bdbf095381ee205d2.cu
#include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <random> constexpr int TILE_DIM = 32; __global__ void multiplication( int * a, int * b, int * c, int dim) { // create shared memory __shared__ int m[TILE_DIM][TILE_DIM]; __shared__ int n[TILE_DIM][TILE_DIM]; // indices will be stored in registers (automatic variables) int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; // compute the columns and row (TILE_DIM == blockDim.x/y) int column = tx + bx * TILE_DIM; int row = ty + by * TILE_DIM; int value = 0; int limit = std::ceil( dim/static_cast< float >( TILE_DIM) ); // loop over the m and n tiles // strip-mining: break a long-running loop into phases // each phase consists of an inner loop // that executes a number of consecutive // steps of the original loop for ( int phase = 0; phase < limit; ++phase) { // load tiles into shared memory m[ty][tx] = a[row * dim + phase * TILE_DIM + tx]; n[ty][tx] = b[(phase * TILE_DIM + ty) * dim + column]; // wait till all threads in the block have finished loading // the tiles into shared memory __syncthreads(); // compute the dot product for ( int k = 0; k < TILE_DIM; ++k) { value += m[ty][k] * n[k][tx]; } // wait till all threads in the block have finished // computing the dot product __syncthreads(); } c[row * dim + column] = value; } void compute_on_device( int dim, int * host_a, int * host_b, int * host_c) { constexpr int tile_dim = 32; // allocate device memory int * device_a, * device_b, * device_c; cudaMalloc( & device_a, dim * dim * sizeof( int) ); cudaMalloc( & device_b, dim * dim * sizeof( int) ); cudaMalloc( & device_c, dim * dim * sizeof( int) ); // copy input matrices from host to device memory cudaMemcpy( device_a, host_a, dim * dim * sizeof( int), cudaMemcpyHostToDevice); cudaMemcpy( device_b, host_b, dim * dim * sizeof( int), cudaMemcpyHostToDevice); dim3 block_dim{ tile_dim, tile_dim }; dim3 grid_dim{ static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.x) ) ), static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.y) ) ) }; auto start = std::chrono::high_resolution_clock::now(); multiplication<<< grid_dim, block_dim >>>( device_a, device_b, device_c, dim); cudaDeviceSynchronize(); auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "device: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; cudaMemcpy( host_c, device_c, dim * dim * sizeof( int), cudaMemcpyDeviceToHost); cudaFree( device_a); cudaFree( device_b); cudaFree( device_c); } void compute_on_host( int dim, int * a, int * b, int * c) { auto start = std::chrono::high_resolution_clock::now(); for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { for ( int k = 0; k < dim; ++k) { c[row * dim + column] += a[row * dim + k] * b[k * dim + column]; } } } auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "host: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; } bool equal( int dim, int * host, int * device) { for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { if ( host[row * dim + column] != device[row * dim + column]) { return false; } } } return true; } int main() { constexpr int dim = 1024; // allocate host memory int * host_a = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); int * host_b = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); // initialize input matrices std::minstd_rand generator; std::uniform_int_distribution<> distribution{ 0, 255 }; for ( unsigned int i = 0; i < dim*dim; ++i) { host_a[i] = distribution( generator); host_b[i] = host_a[i]; } // multiplication on host int * host_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_host( dim, host_a, host_b, host_c); // multiplication on device int * device_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_device( dim, host_a, host_b, device_c); if ( ! equal( dim, host_c, device_c) ) { std::cout << "matrices are not equal" << std::endl; } std::free( host_a); std::free( host_b); std::free( host_c); std::free( device_c); return EXIT_SUCCESS; }
639f6c3e23dd0692c4795a22b9a9103905239492.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <THH/THH.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { magma_dpotrf_gpu(uplo, n, dA, ldda, info); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { magma_spotrf_gpu(uplo, n, dA, ldda, info); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) { magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); } template<> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) { magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); magma_int_t info = 0; magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(), b_data, n, &info); infos[0] = info; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); std::vector<int64_t> infos(batchCount(self), 0); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos[0], "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); magma_int_t info_tmp = 0; Tensor ipiv = at::empty({n}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp); if (info_tmp != 0) { info = info_tmp; return; } magmaGetri<scalar_t>( n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp); info = info_tmp; #endif } Tensor _inverse_helper_cuda(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n, b_data, n, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, n, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, n, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); constexpr int64_t batch_limit = 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = ::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = ::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); if (self.dim() > 2 && pivot && m == n && m <= 32) { /* The magma implementation of small singular square batch matrices has a bug that results nan values in the LU factorization results, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on TODO: disable this block for magma versions that implement a bug fix */ auto batch_size = infos_tensor.numel(); auto infos_array = infos_tensor.view({batch_size}); auto infos_cpu = infos_array.to(at::kCPU); auto infos_data = infos_cpu.data_ptr<int>(); auto input_array = self.view({batch_size, m, n}); auto working_array = self_working_copy.view({batch_size, m, n}); auto pivots_array = pivots_tensor.view({batch_size, k}); for (int64_t i = 0; i < batch_size; i++) { auto info = infos_data[i]; if (info > 0) { /* We'll recompute LU factorization of singular matrices using the non-batch implementation to workaround the magma bug (magma issue 13). */ working_array[i].copy_(input_array[i]); auto matrix = working_array[i]; auto pivots = pivots_array[i]; auto infos = infos_array[i]; apply_lu<scalar_t>(matrix, pivots, infos, pivot); } } } }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, n, b_array_cur, n, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto q_data = Q.data_ptr<scalar_t>(); auto r_data = R.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); auto r_matrix_stride = matrixStride(R); magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)"); magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)"); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) { std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { // Fix the number of columns of q_working_copy appropriately q_sizes[self.dim() - 1] = n_columns_q; q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options()); q_working_copy = q_working_copy.expand_as(q_working_copy); // We repurpose the same q_sizes for r_working_copy // Fix the number of rows and columns of q_working_copy appropriately q_sizes[self.dim() - 1] = self.size(-1); q_sizes[self.dim() - 2] = n_columns_q; r_working_copy = at::empty(q_sizes, self.options()); return std::make_tuple(q_working_copy, r_working_copy); } q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); r_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q), r_working_copy.narrow(-2, 0, n_columns_q).triu()); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(wkopt, "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options()) : at::empty(self_sizes, self.options().device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<scalar_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto k = ::min(m, n); magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k); magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info); lwork = magma_int_cast(wkopt, "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; scalar_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = ::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{ apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-1, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { magma_int_t info = 0; Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
639f6c3e23dd0692c4795a22b9a9103905239492.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <THC/THC.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { magma_dpotrf_gpu(uplo, n, dA, ldda, info); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { magma_spotrf_gpu(uplo, n, dA, ldda, info); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) { magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); } template<> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) { magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); magma_int_t info = 0; magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(), b_data, n, &info); infos[0] = info; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); std::vector<int64_t> infos(batchCount(self), 0); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos[0], "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); magma_int_t info_tmp = 0; Tensor ipiv = at::empty({n}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp); if (info_tmp != 0) { info = info_tmp; return; } magmaGetri<scalar_t>( n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp); info = info_tmp; #endif } Tensor _inverse_helper_cuda(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n, b_data, n, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, n, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, n, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); constexpr int64_t batch_limit = 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = std::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = std::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); if (self.dim() > 2 && pivot && m == n && m <= 32) { /* The magma implementation of small singular square batch matrices has a bug that results nan values in the LU factorization results, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on TODO: disable this block for magma versions that implement a bug fix */ auto batch_size = infos_tensor.numel(); auto infos_array = infos_tensor.view({batch_size}); auto infos_cpu = infos_array.to(at::kCPU); auto infos_data = infos_cpu.data_ptr<int>(); auto input_array = self.view({batch_size, m, n}); auto working_array = self_working_copy.view({batch_size, m, n}); auto pivots_array = pivots_tensor.view({batch_size, k}); for (int64_t i = 0; i < batch_size; i++) { auto info = infos_data[i]; if (info > 0) { /* We'll recompute LU factorization of singular matrices using the non-batch implementation to workaround the magma bug (magma issue 13). */ working_array[i].copy_(input_array[i]); auto matrix = working_array[i]; auto pivots = pivots_array[i]; auto infos = infos_array[i]; apply_lu<scalar_t>(matrix, pivots, infos, pivot); } } } }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, n, b_array_cur, n, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto q_data = Q.data_ptr<scalar_t>(); auto r_data = R.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); auto r_matrix_stride = matrixStride(R); magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)"); magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)"); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) { std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { // Fix the number of columns of q_working_copy appropriately q_sizes[self.dim() - 1] = n_columns_q; q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options()); q_working_copy = q_working_copy.expand_as(q_working_copy); // We repurpose the same q_sizes for r_working_copy // Fix the number of rows and columns of q_working_copy appropriately q_sizes[self.dim() - 1] = self.size(-1); q_sizes[self.dim() - 2] = n_columns_q; r_working_copy = at::empty(q_sizes, self.options()); return std::make_tuple(q_working_copy, r_working_copy); } q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); r_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q), r_working_copy.narrow(-2, 0, n_columns_q).triu()); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(wkopt, "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options()) : at::empty(self_sizes, self.options().device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<scalar_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto k = std::min(m, n); magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k); magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info); lwork = magma_int_cast(wkopt, "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; scalar_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = std::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{ apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-1, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { magma_int_t info = 0; Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
02662036720e734a51190abd8f59d2456b97cf99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> __global__ void fill(int * m, std::size_t w , std::size_t h) { auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; if( idx < w && idy <h ) { m [ idy * w + idx ] = idy * w + idx; } } int main() { std::size_t w =10; std::size_t h =10; std::size_t size =w*h; int * m_h = nullptr; int * m_d = nullptr; hipHostMalloc(&m_h, size * sizeof(int)); hipMalloc( &m_d, size * sizeof(int)); dim3 block (32 , 32); dim3 grid ((w-1) / block.x +1, (h-1)/block.y +1); hipLaunchKernelGGL(( fill), dim3(grid), dim3(block) , 0, 0, m_d, w , h ); hipMemcpy ( m_h , m_d, size * sizeof (int) , hipMemcpyDeviceToHost); for (std::size_t j = 0; j < h ;++j ) { for (std::size_t i =0 ; i<w ; ++i) { std::cout << m_h[j*w +i]<< ' '; } std::cout << std:: endl; } hipFree(m_d); hipHostFree(m_h); return 0; }
02662036720e734a51190abd8f59d2456b97cf99.cu
#include<iostream> __global__ void fill(int * m, std::size_t w , std::size_t h) { auto idx = blockIdx.x * blockDim.x + threadIdx.x; auto idy = blockIdx.y * blockDim.y + threadIdx.y; if( idx < w && idy <h ) { m [ idy * w + idx ] = idy * w + idx; } } int main() { std::size_t w =10; std::size_t h =10; std::size_t size =w*h; int * m_h = nullptr; int * m_d = nullptr; cudaMallocHost(&m_h, size * sizeof(int)); cudaMalloc( &m_d, size * sizeof(int)); dim3 block (32 , 32); dim3 grid ((w-1) / block.x +1, (h-1)/block.y +1); fill<<<grid, block >>>(m_d, w , h ); cudaMemcpy ( m_h , m_d, size * sizeof (int) , cudaMemcpyDeviceToHost); for (std::size_t j = 0; j < h ;++j ) { for (std::size_t i =0 ; i<w ; ++i) { std::cout << m_h[j*w +i]<< ' '; } std::cout << std:: endl; } cudaFree(m_d); cudaFreeHost(m_h); return 0; }
88e4dab2c4a300433b2adc51dd68f03e37f555e0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "detect_edges.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *input = NULL; hipMalloc(&input, XSIZE*YSIZE); unsigned char *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( detect_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( detect_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( detect_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
88e4dab2c4a300433b2adc51dd68f03e37f555e0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "detect_edges.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); unsigned char *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); detect_edges<<<gridBlock,threadBlock>>>(input,output); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { detect_edges<<<gridBlock,threadBlock>>>(input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { detect_edges<<<gridBlock,threadBlock>>>(input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0337584cfd73f6a765e4e83138677affccf8751c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "math.h" #include <stdio.h> #include <tchar.h> #include <vector> #include <map> #include <iostream> //#include <rocblas.h> #include "helper_cuda.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "Common.cuh" #include "Gaussian2d2.cuh" using namespace std; using namespace thrust; namespace Leicester { namespace CudaLib { __global__ void Gaussian2d_CUDA(double *D, double *Dt, double *Dx, double *Dxx, double *TP, int TPx, int TPy, double *CN, int CNx, int CNy, double *A, int Ax, int Ay, double *C, int Cx, int Cy) { int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; __syncthreads(); if (i == 0 & j == 0) { //printf("start mqd2_CUDA i=%i j =%i \r\n", i, j); //double* D = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dt = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dx = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dxx = (double *)malloc(sizeof(double) * TPx *CNy); //printf("allocated arrays mqd2_CUDA i=%i j =%i \r\n", i, j); dim3 threads(32, 32); //dim3 grid(CNx / threads.x, CNy / threads.y); dim3 grid(1, 1); dim3 dimTP(TPx, TPy); dim3 dimCN(CNx, CNy); dim3 dimA(Ax, Ay); dim3 dimC(Cx, Cy); //printf("TP size=%f", sizeof(TP)); //printMatrix_CUDA << < dim3(1,1), dim3(1,1)>> > (TP, dimTP); //gpuErrchk << <1, 1 >> >(hipPeekAtLastError()); //gpuErrchk << <1, 1 >> >(hipDeviceSynchronize()); //printf("dimTPx=%i dimTPy=%i dimCNx=%i dimCNy=%i dimAx=%i dimAy=%i dimCx=%i dimCy=%i\r\n", dimTP.x, dimTP.y, dimCN.x, dimCN.y, dimA.x, dimA.y, dimC.x, dimC.y); Gaussian2d2_CUDA << <1, dim3(1, CNy) >> > (D, Dt, Dx, Dxx, TP, dimTP, CN, dimCN, A, dimA, C, dimC); gpuAssert << <1, 1 >> > (hipPeekAtLastError(), __FILE__, __LINE__); gpuAssert << <1, 1 >> > (hipDeviceSynchronize(), __FILE__, __LINE__); //printf("D size=%f", sizeof(D)); //printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (D, dim3(TPy, TPy)); //gpuErrchk << <1, 1 >> >(hipPeekAtLastError()); //gpuErrchk << <1, 1 >> >(hipDeviceSynchronize()); //__syncthreads(); //result[0] = D; //result[1] = Dt; //result[2] = Dx; //result[3] = Dxx; } __syncthreads(); //printf("end mqd2_CUDA"); } } }
0337584cfd73f6a765e4e83138677affccf8751c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "math.h" #include <stdio.h> #include <tchar.h> #include <vector> #include <map> #include <iostream> //#include <cublas_v2.h> #include "helper_cuda.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "Common.cuh" #include "Gaussian2d2.cuh" using namespace std; using namespace thrust; namespace Leicester { namespace CudaLib { __global__ void Gaussian2d_CUDA(double *D, double *Dt, double *Dx, double *Dxx, double *TP, int TPx, int TPy, double *CN, int CNx, int CNy, double *A, int Ax, int Ay, double *C, int Cx, int Cy) { int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; __syncthreads(); if (i == 0 & j == 0) { //printf("start mqd2_CUDA i=%i j =%i \r\n", i, j); //double* D = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dt = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dx = (double *)malloc(sizeof(double) * TPx *CNy); //double* Dxx = (double *)malloc(sizeof(double) * TPx *CNy); //printf("allocated arrays mqd2_CUDA i=%i j =%i \r\n", i, j); dim3 threads(32, 32); //dim3 grid(CNx / threads.x, CNy / threads.y); dim3 grid(1, 1); dim3 dimTP(TPx, TPy); dim3 dimCN(CNx, CNy); dim3 dimA(Ax, Ay); dim3 dimC(Cx, Cy); //printf("TP size=%f", sizeof(TP)); //printMatrix_CUDA << < dim3(1,1), dim3(1,1)>> > (TP, dimTP); //gpuErrchk << <1, 1 >> >(cudaPeekAtLastError()); //gpuErrchk << <1, 1 >> >(cudaDeviceSynchronize()); //printf("dimTPx=%i dimTPy=%i dimCNx=%i dimCNy=%i dimAx=%i dimAy=%i dimCx=%i dimCy=%i\r\n", dimTP.x, dimTP.y, dimCN.x, dimCN.y, dimA.x, dimA.y, dimC.x, dimC.y); Gaussian2d2_CUDA << <1, dim3(1, CNy) >> > (D, Dt, Dx, Dxx, TP, dimTP, CN, dimCN, A, dimA, C, dimC); gpuAssert << <1, 1 >> > (cudaPeekAtLastError(), __FILE__, __LINE__); gpuAssert << <1, 1 >> > (cudaDeviceSynchronize(), __FILE__, __LINE__); //printf("D size=%f", sizeof(D)); //printMatrix_CUDA << < dim3(1, 1), dim3(1, 1) >> > (D, dim3(TPy, TPy)); //gpuErrchk << <1, 1 >> >(cudaPeekAtLastError()); //gpuErrchk << <1, 1 >> >(cudaDeviceSynchronize()); //__syncthreads(); //result[0] = D; //result[1] = Dt; //result[2] = Dx; //result[3] = Dxx; } __syncthreads(); //printf("end mqd2_CUDA"); } } }
0e0cf7b651ad182fc7f9ded06ab3909921912edc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "TwoStepRATTLENVEGPU.cuh" #include "hoomd/VectorMath.h" #include <assert.h> //! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_image array of particle images \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param deltaT timestep \param limit If \a limit is true, then the dynamics will be limited so that particles do not move a distance further than \a limit_val in one step. \param limit_val Length to limit particle distance movement to \param zero_force Set to true to always assign an acceleration of 0 to all particles in the group This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread and updates that particle. <b>Performance notes:</b> Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi. */ extern "C" __global__ void gpu_rattle_nve_step_one_kernel(Scalar4* d_pos, Scalar4* d_vel, const Scalar3* d_accel, int3* d_image, unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, BoxDim box, Scalar deltaT, bool limit, Scalar limit_val) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // do velocity verlet update // r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2 // v(t+deltaT/2) = v(t) + (1/2)a*deltaT // read the particle's position (MEM TRANSFER: 16 bytes) Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 velmass = d_vel[idx]; Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z); Scalar3 accel = d_accel[idx]; Scalar deltaT_half = Scalar(1.0 / 2.0) * deltaT; Scalar3 half_vel; half_vel = vel + deltaT_half * accel; // update the position (FLOPS: 15) Scalar3 dx = deltaT * half_vel; // limit the movement of the particles if (limit) { Scalar len = sqrtf(dot(dx, dx)); if (len > limit_val) dx = dx / len * limit_val; } // FLOPS: 3 pos += dx; // update the velocity (FLOPS: 3) vel = half_vel; // read in the particle's image (MEM TRANSFER: 16 bytes) int3 image = d_image[idx]; // fix the periodic boundary conditions (FLOPS: 15) box.wrap(pos, image); // write out the results (MEM_TRANSFER: 48 bytes) d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w); d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w); d_image[idx] = image; } } /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_image array of particle images \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param deltaT timestep \param limit If \a limit is true, then the dynamics will be limited so that particles do not move a distance further than \a limit_val in one step. \param limit_val Length to limit particle distance movement to \param zero_force Set to true to always assign an acceleration of 0 to all particles in the group See gpu_rattle_nve_step_one_kernel() for full documentation, this function is just a driver. */ hipError_t gpu_rattle_nve_step_one(Scalar4* d_pos, Scalar4* d_vel, const Scalar3* d_accel, int3* d_image, unsigned int* d_group_members, const GPUPartition& gpu_partition, const BoxDim& box, Scalar deltaT, bool limit, Scalar limit_val, unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_step_one_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_pos, d_vel, d_accel, d_image, d_group_members, nwork, range.first, box, deltaT, limit, limit_val); } return hipSuccess; } //! NO_SQUISH angular part of the first half step /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ extern "C" __global__ void gpu_rattle_nve_angular_step_one_kernel(Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, const unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, Scalar deltaT, Scalar scale) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // read the particle's orientation, conjugate quaternion, moment of inertia and net torque quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> t(d_net_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q), t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON)); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = Scalar(0.0); if (y_zero) t.y = Scalar(0.0); if (z_zero) t.z = Scalar(0.0); // advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT) p += deltaT * q * t; p = p * scale; quat<Scalar> p1, p2, p3; // permutated quaternions quat<Scalar> q1, q2, q3; Scalar phi1, cphi1, sphi1; Scalar phi2, cphi2, sphi2; Scalar phi3, cphi3, sphi3; if (!z_zero) { p3 = quat<Scalar>(-p.v.z, vec3<Scalar>(p.v.y, -p.v.x, p.s)); q3 = quat<Scalar>(-q.v.z, vec3<Scalar>(q.v.y, -q.v.x, q.s)); phi3 = Scalar(1. / 4.) / I.z * dot(p, q3); cphi3 = slow::cos(Scalar(1. / 2.) * deltaT * phi3); sphi3 = slow::sin(Scalar(1. / 2.) * deltaT * phi3); p = cphi3 * p + sphi3 * p3; q = cphi3 * q + sphi3 * q3; } if (!y_zero) { p2 = quat<Scalar>(-p.v.y, vec3<Scalar>(-p.v.z, p.s, p.v.x)); q2 = quat<Scalar>(-q.v.y, vec3<Scalar>(-q.v.z, q.s, q.v.x)); phi2 = Scalar(1. / 4.) / I.y * dot(p, q2); cphi2 = slow::cos(Scalar(1. / 2.) * deltaT * phi2); sphi2 = slow::sin(Scalar(1. / 2.) * deltaT * phi2); p = cphi2 * p + sphi2 * p2; q = cphi2 * q + sphi2 * q2; } if (!x_zero) { p1 = quat<Scalar>(-p.v.x, vec3<Scalar>(p.s, p.v.z, -p.v.y)); q1 = quat<Scalar>(-q.v.x, vec3<Scalar>(q.s, q.v.z, -q.v.y)); phi1 = Scalar(1. / 4.) / I.x * dot(p, q1); cphi1 = slow::cos(deltaT * phi1); sphi1 = slow::sin(deltaT * phi1); p = cphi1 * p + sphi1 * p1; q = cphi1 * q + sphi1 * q1; } if (!y_zero) { p2 = quat<Scalar>(-p.v.y, vec3<Scalar>(-p.v.z, p.s, p.v.x)); q2 = quat<Scalar>(-q.v.y, vec3<Scalar>(-q.v.z, q.s, q.v.x)); phi2 = Scalar(1. / 4.) / I.y * dot(p, q2); cphi2 = slow::cos(Scalar(1. / 2.) * deltaT * phi2); sphi2 = slow::sin(Scalar(1. / 2.) * deltaT * phi2); p = cphi2 * p + sphi2 * p2; q = cphi2 * q + sphi2 * q2; } if (!z_zero) { p3 = quat<Scalar>(-p.v.z, vec3<Scalar>(p.v.y, -p.v.x, p.s)); q3 = quat<Scalar>(-q.v.z, vec3<Scalar>(q.v.y, -q.v.x, q.s)); phi3 = Scalar(1. / 4.) / I.z * dot(p, q3); cphi3 = slow::cos(Scalar(1. / 2.) * deltaT * phi3); sphi3 = slow::sin(Scalar(1. / 2.) * deltaT * phi3); p = cphi3 * p + sphi3 * p3; q = cphi3 * q + sphi3 * q3; } // renormalize (improves stability) q = q * (Scalar(1.0) / slow::sqrt(norm2(q))); d_orientation[idx] = quat_to_scalar4(q); d_angmom[idx] = quat_to_scalar4(p); } } /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ hipError_t gpu_rattle_nve_angular_step_one(Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const GPUPartition& gpu_partition, Scalar deltaT, Scalar scale, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_angular_step_one_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_angular_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale); } return hipSuccess; } //! NO_SQUISH angular part of the second half step /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ extern "C" __global__ void gpu_rattle_nve_angular_step_two_kernel(const Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, Scalar deltaT, Scalar scale) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // read the particle's orientation, conjugate quaternion, moment of inertia and net torque quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> t(d_net_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q), t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON)); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = Scalar(0.0); if (y_zero) t.y = Scalar(0.0); if (z_zero) t.z = Scalar(0.0); // rescale p = p * scale; // advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT) p += deltaT * q * t; d_angmom[idx] = quat_to_scalar4(p); } } /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ hipError_t gpu_rattle_nve_angular_step_two(const Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const GPUPartition& gpu_partition, Scalar deltaT, Scalar scale, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_angular_step_two_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_angular_step_two_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale); } return hipSuccess; }
0e0cf7b651ad182fc7f9ded06ab3909921912edc.cu
#include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "TwoStepRATTLENVEGPU.cuh" #include "hoomd/VectorMath.h" #include <assert.h> //! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_image array of particle images \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param deltaT timestep \param limit If \a limit is true, then the dynamics will be limited so that particles do not move a distance further than \a limit_val in one step. \param limit_val Length to limit particle distance movement to \param zero_force Set to true to always assign an acceleration of 0 to all particles in the group This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread and updates that particle. <b>Performance notes:</b> Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi. */ extern "C" __global__ void gpu_rattle_nve_step_one_kernel(Scalar4* d_pos, Scalar4* d_vel, const Scalar3* d_accel, int3* d_image, unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, BoxDim box, Scalar deltaT, bool limit, Scalar limit_val) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // do velocity verlet update // r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2 // v(t+deltaT/2) = v(t) + (1/2)a*deltaT // read the particle's position (MEM TRANSFER: 16 bytes) Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 velmass = d_vel[idx]; Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z); Scalar3 accel = d_accel[idx]; Scalar deltaT_half = Scalar(1.0 / 2.0) * deltaT; Scalar3 half_vel; half_vel = vel + deltaT_half * accel; // update the position (FLOPS: 15) Scalar3 dx = deltaT * half_vel; // limit the movement of the particles if (limit) { Scalar len = sqrtf(dot(dx, dx)); if (len > limit_val) dx = dx / len * limit_val; } // FLOPS: 3 pos += dx; // update the velocity (FLOPS: 3) vel = half_vel; // read in the particle's image (MEM TRANSFER: 16 bytes) int3 image = d_image[idx]; // fix the periodic boundary conditions (FLOPS: 15) box.wrap(pos, image); // write out the results (MEM_TRANSFER: 48 bytes) d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w); d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w); d_image[idx] = image; } } /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_image array of particle images \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param deltaT timestep \param limit If \a limit is true, then the dynamics will be limited so that particles do not move a distance further than \a limit_val in one step. \param limit_val Length to limit particle distance movement to \param zero_force Set to true to always assign an acceleration of 0 to all particles in the group See gpu_rattle_nve_step_one_kernel() for full documentation, this function is just a driver. */ hipError_t gpu_rattle_nve_step_one(Scalar4* d_pos, Scalar4* d_vel, const Scalar3* d_accel, int3* d_image, unsigned int* d_group_members, const GPUPartition& gpu_partition, const BoxDim& box, Scalar deltaT, bool limit, Scalar limit_val, unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_step_one_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_pos, d_vel, d_accel, d_image, d_group_members, nwork, range.first, box, deltaT, limit, limit_val); } return hipSuccess; } //! NO_SQUISH angular part of the first half step /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ extern "C" __global__ void gpu_rattle_nve_angular_step_one_kernel(Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, const unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, Scalar deltaT, Scalar scale) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // read the particle's orientation, conjugate quaternion, moment of inertia and net torque quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> t(d_net_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q), t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON)); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = Scalar(0.0); if (y_zero) t.y = Scalar(0.0); if (z_zero) t.z = Scalar(0.0); // advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT) p += deltaT * q * t; p = p * scale; quat<Scalar> p1, p2, p3; // permutated quaternions quat<Scalar> q1, q2, q3; Scalar phi1, cphi1, sphi1; Scalar phi2, cphi2, sphi2; Scalar phi3, cphi3, sphi3; if (!z_zero) { p3 = quat<Scalar>(-p.v.z, vec3<Scalar>(p.v.y, -p.v.x, p.s)); q3 = quat<Scalar>(-q.v.z, vec3<Scalar>(q.v.y, -q.v.x, q.s)); phi3 = Scalar(1. / 4.) / I.z * dot(p, q3); cphi3 = slow::cos(Scalar(1. / 2.) * deltaT * phi3); sphi3 = slow::sin(Scalar(1. / 2.) * deltaT * phi3); p = cphi3 * p + sphi3 * p3; q = cphi3 * q + sphi3 * q3; } if (!y_zero) { p2 = quat<Scalar>(-p.v.y, vec3<Scalar>(-p.v.z, p.s, p.v.x)); q2 = quat<Scalar>(-q.v.y, vec3<Scalar>(-q.v.z, q.s, q.v.x)); phi2 = Scalar(1. / 4.) / I.y * dot(p, q2); cphi2 = slow::cos(Scalar(1. / 2.) * deltaT * phi2); sphi2 = slow::sin(Scalar(1. / 2.) * deltaT * phi2); p = cphi2 * p + sphi2 * p2; q = cphi2 * q + sphi2 * q2; } if (!x_zero) { p1 = quat<Scalar>(-p.v.x, vec3<Scalar>(p.s, p.v.z, -p.v.y)); q1 = quat<Scalar>(-q.v.x, vec3<Scalar>(q.s, q.v.z, -q.v.y)); phi1 = Scalar(1. / 4.) / I.x * dot(p, q1); cphi1 = slow::cos(deltaT * phi1); sphi1 = slow::sin(deltaT * phi1); p = cphi1 * p + sphi1 * p1; q = cphi1 * q + sphi1 * q1; } if (!y_zero) { p2 = quat<Scalar>(-p.v.y, vec3<Scalar>(-p.v.z, p.s, p.v.x)); q2 = quat<Scalar>(-q.v.y, vec3<Scalar>(-q.v.z, q.s, q.v.x)); phi2 = Scalar(1. / 4.) / I.y * dot(p, q2); cphi2 = slow::cos(Scalar(1. / 2.) * deltaT * phi2); sphi2 = slow::sin(Scalar(1. / 2.) * deltaT * phi2); p = cphi2 * p + sphi2 * p2; q = cphi2 * q + sphi2 * q2; } if (!z_zero) { p3 = quat<Scalar>(-p.v.z, vec3<Scalar>(p.v.y, -p.v.x, p.s)); q3 = quat<Scalar>(-q.v.z, vec3<Scalar>(q.v.y, -q.v.x, q.s)); phi3 = Scalar(1. / 4.) / I.z * dot(p, q3); cphi3 = slow::cos(Scalar(1. / 2.) * deltaT * phi3); sphi3 = slow::sin(Scalar(1. / 2.) * deltaT * phi3); p = cphi3 * p + sphi3 * p3; q = cphi3 * q + sphi3 * q3; } // renormalize (improves stability) q = q * (Scalar(1.0) / slow::sqrt(norm2(q))); d_orientation[idx] = quat_to_scalar4(q); d_angmom[idx] = quat_to_scalar4(p); } } /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ hipError_t gpu_rattle_nve_angular_step_one(Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const GPUPartition& gpu_partition, Scalar deltaT, Scalar scale, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_angular_step_one_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_angular_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale); } return hipSuccess; } //! NO_SQUISH angular part of the second half step /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ extern "C" __global__ void gpu_rattle_nve_angular_step_two_kernel(const Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const unsigned int nwork, const unsigned int offset, Scalar deltaT, Scalar scale) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx < nwork) { const unsigned int group_idx = work_idx + offset; unsigned int idx = d_group_members[group_idx]; // read the particle's orientation, conjugate quaternion, moment of inertia and net torque quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> t(d_net_torque[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q), t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON)); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = Scalar(0.0); if (y_zero) t.y = Scalar(0.0); if (z_zero) t.z = Scalar(0.0); // rescale p = p * scale; // advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT) p += deltaT * q * t; d_angmom[idx] = quat_to_scalar4(p); } } /*! \param d_orientation array of particle orientations \param d_angmom array of particle conjugate quaternions \param d_inertia array of moments of inertia \param d_net_torque array of net torques \param d_group_members Device array listing the indices of the members of the group to integrate \param group_size Number of members in the group \param deltaT timestep */ hipError_t gpu_rattle_nve_angular_step_two(const Scalar4* d_orientation, Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_torque, unsigned int* d_group_members, const GPUPartition& gpu_partition, Scalar deltaT, Scalar scale, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_angular_step_two_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; // setup the grid to run the kernel dim3 grid((nwork / run_block_size) + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // run the kernel hipLaunchKernelGGL((gpu_rattle_nve_angular_step_two_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale); } return hipSuccess; }
51ae475051c0242cba45a2567d8cf5b055eaed45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { __global__ void DynamicLossScaleScheduleGpu(const int64_t increment_period, const float multiplier, const int64_t* count_not_finite, float* loss_scale, int64_t* good_step_counter) { if (*count_not_finite == 0) { int64_t cur_good_step_counter = *good_step_counter + 1; if (cur_good_step_counter >= increment_period) { *loss_scale = static_cast<float>( min(static_cast<double>(*loss_scale) * multiplier, static_cast<double>(FLT_MAX))); cur_good_step_counter = 0; } *good_step_counter = cur_good_step_counter; } else { *good_step_counter = 0; *loss_scale = static_cast<float>(max(static_cast<double>(*loss_scale) / multiplier, 1.0)); } } } // namespace class DynamicLossScaleScheduleGpuKernel final : public user_op::OpKernel { public: DynamicLossScaleScheduleGpuKernel() = default; ~DynamicLossScaleScheduleGpuKernel() override = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* count_not_finite = ctx->Tensor4ArgNameAndIndex("count_not_finite", 0); user_op::Tensor* loss_scale = ctx->Tensor4ArgNameAndIndex("loss_scale", 0); user_op::Tensor* good_step_counter = ctx->Tensor4ArgNameAndIndex("good_step_counter", 0); const auto increment_period = ctx->Attr<int64_t>("increment_period"); const auto multiplier = ctx->Attr<float>("multiplier"); hipLaunchKernelGGL(( DynamicLossScaleScheduleGpu), dim3(1), dim3(1), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), increment_period, multiplier, count_not_finite->dptr<int64_t>(), loss_scale->mut_dptr<float>(), good_step_counter->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; } }; REGISTER_USER_KERNEL("dynamic_loss_scale_schedule") .SetCreateFn<DynamicLossScaleScheduleGpuKernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA)); } // namespace oneflow
51ae475051c0242cba45a2567d8cf5b055eaed45.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { __global__ void DynamicLossScaleScheduleGpu(const int64_t increment_period, const float multiplier, const int64_t* count_not_finite, float* loss_scale, int64_t* good_step_counter) { if (*count_not_finite == 0) { int64_t cur_good_step_counter = *good_step_counter + 1; if (cur_good_step_counter >= increment_period) { *loss_scale = static_cast<float>( min(static_cast<double>(*loss_scale) * multiplier, static_cast<double>(FLT_MAX))); cur_good_step_counter = 0; } *good_step_counter = cur_good_step_counter; } else { *good_step_counter = 0; *loss_scale = static_cast<float>(max(static_cast<double>(*loss_scale) / multiplier, 1.0)); } } } // namespace class DynamicLossScaleScheduleGpuKernel final : public user_op::OpKernel { public: DynamicLossScaleScheduleGpuKernel() = default; ~DynamicLossScaleScheduleGpuKernel() override = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* count_not_finite = ctx->Tensor4ArgNameAndIndex("count_not_finite", 0); user_op::Tensor* loss_scale = ctx->Tensor4ArgNameAndIndex("loss_scale", 0); user_op::Tensor* good_step_counter = ctx->Tensor4ArgNameAndIndex("good_step_counter", 0); const auto increment_period = ctx->Attr<int64_t>("increment_period"); const auto multiplier = ctx->Attr<float>("multiplier"); DynamicLossScaleScheduleGpu<<<1, 1, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( increment_period, multiplier, count_not_finite->dptr<int64_t>(), loss_scale->mut_dptr<float>(), good_step_counter->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; } }; REGISTER_USER_KERNEL("dynamic_loss_scale_schedule") .SetCreateFn<DynamicLossScaleScheduleGpuKernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA)); } // namespace oneflow
3ce06dc841f4f5cde2b76e2bcf95d543056dd64f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<iostream> #include<stdlib.h> #include<vector> #include<bitset> #include<cuda_runtime.h> #include<device_launch_parameters.h> #define DATA_SIZE 2000 #define DOUBLE_DATA_SIZE 4000 static float look_up_table[64]; static float* look_up_table_gpu; struct Entries_gpu0 { int imageId; int idxId; bool HMBM[64]; }; struct inverted_gpu0 { float thresholds[64]; float idf_weight; int indexInmap; // map_begin }; static float *mat_gpu; static int *sum_mat_gpu,*result_gpu,*wordsID_gpu; static inverted_gpu0 *inverted_gpu; static Entries_gpu0 *Entries_gpu; static int *image_index_gpu; static float *descriptor_gpu; static float *d_A; static float *d_B; static float *d_Result; bool InitCUDA() { int count; hipGetDeviceCount(&count); printf("threr are %d device!",count); if(count == 0){ fprintf(stderr,"there is no device.\n"); return false; } int i; for(i = 0;i<count;i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } __global__ static void gpu_function(float * mat_g, int *sum_mat_g,int *result_g) { int indexId = blockIdx.x; float *mat = mat_g + indexId * DATA_SIZE * DATA_SIZE; int * sum_mat = sum_mat_g + indexId * DOUBLE_DATA_SIZE; __shared__ int smallIndex1[256]; __shared__ int sum_small1[256]; // __shared__ int sum_mat_shared_g[DOUBLE_DATA_SIZE]; // for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) // { // sum_mat_shared_g[index_query] = sum_mat_g[indexId * DOUBLE_DATA_SIZE + index_query]; // sum_mat_g[indexId * DOUBLE_DATA_SIZE + index_query] = 0; // } while(true) { __shared__ int sum_small; sum_small = 4000; __shared__ int smallIndex; smallIndex = -1; smallIndex1[threadIdx.x] = -1; sum_small1[threadIdx.x] = 4000; for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) { if(sum_mat[index_query] > 0 && sum_mat[index_query] < sum_small1[threadIdx.x]) { smallIndex1[threadIdx.x] = index_query; sum_small1[threadIdx.x] = sum_mat[index_query];// } // if(sum_mat[index_query] == 1) // { // atomicExch(&sum_small, 1); // atomicExch(&smallIndex, index_query); // } } __syncthreads(); if(sum_small1[threadIdx.x] == 1) { atomicExch(&sum_small, 1); atomicExch(&smallIndex, smallIndex1[threadIdx.x]); } __syncthreads(); if(sum_small != 1) { if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x; index += 16) { if(sum_small1[index]>0 && sum_small1[threadIdx.x] > sum_small1[index]){ smallIndex1[threadIdx.x] = smallIndex1[index]; sum_small1[threadIdx.x] = sum_small1[index]; } } __syncthreads(); if(threadIdx.x==0) for(int index = 0; index < 16; index++) { if(sum_small1[index]>0 && sum_small > sum_small1[index]) { smallIndex = smallIndex1[index]; sum_small = sum_small1[index]; } } } __syncthreads(); if(sum_small == 4000) { return; } sum_mat[smallIndex] = 0; int bigDistIndex = -1; float distNumber = 0; __shared__ int bigDistI[256]; __shared__ float distNum[256]; bigDistI[threadIdx.x] = -1; distNum[threadIdx.x] = 0; __syncthreads(); if(smallIndex < DATA_SIZE) { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex * DATA_SIZE + index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[DATA_SIZE+index] --; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); #pragma unroll for(int index = 0; index < 16; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex+DATA_SIZE] = 0; result_g[indexId * DATA_SIZE + smallIndex] = bigDistIndex + 1; for(int indexIdx = threadIdx.x; indexIdx < DATA_SIZE; indexIdx += blockDim.x) { int temp2 = indexIdx * DATA_SIZE + bigDistIndex; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIdx]--; } } } else { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex - DATA_SIZE + DATA_SIZE * index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[index] --; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); #pragma unroll for(int index = 0; index < 16; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex] = 0; result_g[indexId * DATA_SIZE + bigDistIndex] = smallIndex - DATA_SIZE + 1; for(int indexIm = threadIdx.x; indexIm < DATA_SIZE; indexIm += blockDim.x) { int temp2 = indexIm + bigDistIndex * DATA_SIZE; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIm + DATA_SIZE]--; } } } __syncthreads(); } } __global__ static void gpu_functionNew(float * mat_g, int *sum_mat_g,int *result_g) { int indexId = blockIdx.x; if(indexId>99) return; float *mat = mat_g + indexId * DATA_SIZE * DATA_SIZE; int * sum_mat = sum_mat_g + indexId * DOUBLE_DATA_SIZE; __shared__ int smallIndex1[256]; __shared__ int sum_small1[256]; while(true) { __shared__ int sum_small; sum_small = 4000; __shared__ int smallIndex; smallIndex = -1; smallIndex1[threadIdx.x] = -1; sum_small1[threadIdx.x] = 4000; for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) { if(sum_mat[index_query] > 0 && sum_mat[index_query] < sum_small1[threadIdx.x] && index_query < DOUBLE_DATA_SIZE) { smallIndex1[threadIdx.x] = index_query; sum_small1[threadIdx.x] = sum_mat[index_query];// } if(sum_mat[index_query] == 1) { sum_small = 1; smallIndex = index_query; } } __syncthreads(); if(sum_small != 1) { if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x; index += 16) { if(sum_small1[index]>0 && sum_small1[threadIdx.x] > sum_small1[index]){ smallIndex1[threadIdx.x] = smallIndex1[index]; sum_small1[threadIdx.x] = sum_small1[index]; } } __syncthreads(); if(threadIdx.x==0) for(int index = 0; index < 16; index++) { if(sum_small1[index]>0 && sum_small > sum_small1[index]) { smallIndex = smallIndex1[index]; sum_small = sum_small1[index]; } } } __syncthreads(); if(sum_small == 4000) return; sum_mat[smallIndex] = 0; int bigDistIndex = -1; float distNumber = 0; __shared__ int bigDistI[256]; __shared__ float distNum[256]; bigDistI[threadIdx.x] = -1; distNum[threadIdx.x] = 0; __syncthreads(); if(smallIndex < DATA_SIZE) { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex * DATA_SIZE + index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[DATA_SIZE+index] --; } } __syncthreads(); if(threadIdx.x<64) for(int index = threadIdx.x; index < blockDim.x ; index += 64) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < 64 ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<4) for(int index = threadIdx.x; index < 16 ; index += 4) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); for(int index = 0; index < 4; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex+DATA_SIZE] = 0; result_g[indexId * DATA_SIZE + smallIndex] = bigDistIndex + 1; for(int indexIdx = threadIdx.x; indexIdx < DATA_SIZE; indexIdx += blockDim.x) { int temp2 = indexIdx * DATA_SIZE + bigDistIndex; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIdx]--; } } } else { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex - DATA_SIZE + DATA_SIZE * index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[index] --; } } __syncthreads(); if(threadIdx.x<64) for(int index = threadIdx.x; index < blockDim.x ; index += 64) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < 64 ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<4) for(int index = threadIdx.x; index < 16 ; index += 4) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); for(int index = 0; index < 4; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex] = 0; result_g[indexId * DATA_SIZE + bigDistIndex] = smallIndex - DATA_SIZE + 1; for(int indexIm = threadIdx.x; indexIm < DATA_SIZE; indexIm += blockDim.x) { int temp2 = indexIm + bigDistIndex * DATA_SIZE; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIm + DATA_SIZE]--; } } } __syncthreads(); } } void function1(int *result) { // hipStream_t stream, stream1; // hipStreamCreate(&stream); // hipStreamCreate(&stream1); dim3 blockdim(256,1,1); dim3 griddim(64,1,1); float time_GPU; hipEvent_t start_GPU, stop_GPU; hipEventCreate(&start_GPU); hipEventCreate(&stop_GPU); hipEventRecord(start_GPU, 0); hipMemset(result_gpu, 0, sizeof(int) * DATA_SIZE * 100); gpu_function<< <griddim ,blockdim, 0 >> >(mat_gpu, sum_mat_gpu, result_gpu); hipEventRecord(stop_GPU, 0); hipEventSynchronize(start_GPU); hipEventSynchronize(stop_GPU); hipEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function1:\t%f(ms)\n", time_GPU); hipEventDestroy(start_GPU); hipEventDestroy(stop_GPU); //gpu_function<< <griddim ,blockdim, 0 >> >(mat_gpu+DATA_SIZE*DATA_SIZE*50, sum_mat_gpu + DOUBLE_DATA_SIZE*50, result_gpu+ DATA_SIZE*50); // hipStreamSynchronize(stream); // hipStreamSynchronize(stream1); // hipStreamDestroy(stream); // hipStreamDestroy(stream1); // static float *mat_cpu = new float[DATA_SIZE*DATA_SIZE * 100]; // static int *sum_mat_cpu = new int[DATA_SIZE*200]; // hipMemcpy(mat_cpu, mat_gpu, sizeof(float) * DATA_SIZE*DATA_SIZE * 100, hipMemcpyDeviceToHost); // hipMemcpy(sum_mat_cpu, sum_mat_gpu, sizeof(int) * DATA_SIZE * 200, hipMemcpyDeviceToHost); // int a = 0; // int b = 0; // for(int i = 0;i<DATA_SIZE*DATA_SIZE * 100;i++) // if(mat_cpu[i]>0) a++; // for(int i = 0; i<2000*200;i++) // if(sum_mat_cpu[i]!=0) // b++; // printf("a:%d\t",a);printf("b:%d\n",b); hipMemcpy(result, result_gpu, sizeof(int) * DATA_SIZE * 100, hipMemcpyDeviceToHost); } __global__ static void gpu_words1(int* wordsId,Entries_gpu0* Entries_gpu, inverted_gpu0* inverted_gpu,float * mat_g, int *sum_mat_g,int * image_index, float * LUTGpu,float *descriptor,int size) { for(int blockIndex = blockIdx.x;blockIndex<size;blockIndex += gridDim.x) { int word_id ; //= wordsId[blockIndex*blockDim.x+threadIdx.x]; for(int i = 0; i<5; i++) { word_id = wordsId[blockIndex*5+i]; if(word_id <1000000) { inverted_gpu0 temp = inverted_gpu[word_id]; __shared__ bool des[64]; for(int i = threadIdx.y; i<64; i += blockDim.y) { des[i] = (descriptor[blockIndex * 64 + i] > inverted_gpu[word_id].thresholds[i]); } float idf_weight = temp.idf_weight; float squared_idf_weight = idf_weight * idf_weight; int match_begin = temp.indexInmap; int match_end = inverted_gpu[word_id+1].indexInmap; __syncthreads(); // if(match_end - match_begin > 2000 ) // { // printf("run-%d\t",match_end - match_begin); // break; // match_end = match_begin +2000; // } for(int index = match_begin + threadIdx.y; index < match_end; index += blockDim.y) { Entries_gpu0 temp_entrie = Entries_gpu[index]; if(image_index[temp_entrie.imageId] < 0) continue; size_t hamming_dist = 0; for(int j = 0; j < 64; j++) { if(des[j] ^ temp_entrie.HMBM[j]) { hamming_dist ++;//= (des[j] ^ temp_entrie.HMBM[j]); } } if (hamming_dist <= 24) { const float dist = LUTGpu[hamming_dist] * squared_idf_weight; int image_id = temp_entrie.imageId; int i = blockIndex; int feature_idx = temp_entrie.idxId; if (i < DATA_SIZE && feature_idx < DATA_SIZE) { int index_match = image_index[image_id] * DATA_SIZE * DATA_SIZE + i * DATA_SIZE + feature_idx; if (mat_g[index_match] == 0) { atomicAdd(&sum_mat_g[image_index[image_id] * DOUBLE_DATA_SIZE + i], 1); atomicAdd(&sum_mat_g[image_index[image_id] * DOUBLE_DATA_SIZE + DATA_SIZE + feature_idx], 1); mat_g[index_match] = float(dist + 1); } else if (mat_g[index_match] < float(dist + 1)) { mat_g[index_match] = float(dist + 1); } } } } } __syncthreads(); } } } __global__ static void gpu_words2(float * mat_g, int *sum_mat_g) { int index_image = blockIdx.x; int sum = 0; for(int index_idx = threadIdx.x; index_idx<DATA_SIZE; index_idx+=blockDim.x) { for(int index = 0; index < DATA_SIZE; index++) if(mat_g[index_image*DATA_SIZE*DATA_SIZE + index_idx*DATA_SIZE + index]) sum++; sum_mat_g[index_image*DOUBLE_DATA_SIZE + index_idx] = sum; sum = 0; } for(int index_quere = threadIdx.x; index_quere<DATA_SIZE; index_quere+=blockDim.x) { for(int index = 0; index < DATA_SIZE; index++) if(mat_g[index_image*DATA_SIZE*DATA_SIZE + index_quere + DATA_SIZE*index]) sum++; sum_mat_g[index_image*DOUBLE_DATA_SIZE + DATA_SIZE + index_quere] = sum; sum = 0; } } void function_words(int * imageIndex, float *descriptor, int * wordsId, int size) { float time_GPU; hipEvent_t start_GPU, stop_GPU; hipEventCreate(&start_GPU); hipEventCreate(&stop_GPU); hipEventRecord(start_GPU, 0); // hipStream_t stream, stream1; // hipStreamCreate(&stream); // hipStreamCreate(&stream1); dim3 blockdim(1,64,1); int size_mall; if(size >DATA_SIZE) size_mall = DATA_SIZE; else size_mall = size; dim3 griddim(size_mall,1,1); hipMemcpy(image_index_gpu, imageIndex, sizeof(int)*10000, hipMemcpyHostToDevice); hipMemcpy(wordsID_gpu, wordsId, sizeof(int)*size_mall*5, hipMemcpyHostToDevice); gpu_words1<< <griddim ,blockdim, 0 >> >(wordsID_gpu,Entries_gpu, inverted_gpu,mat_gpu, sum_mat_gpu, image_index_gpu,look_up_table_gpu,descriptor_gpu,size_mall); //gpu_words2<<<100,size/4+1>>>(mat_gpu,sum_mat_gpu); // gpu_words1<< <griddim ,blockdim, 0 ,stream1 >> >(wordsID_gpu + 500,Entries_gpu, // inverted_gpu,mat_gpu, sum_mat_gpu, // image_index_gpu,look_up_table_gpu,descriptor_gpu,size_mall - 500); // hipStreamSynchronize(stream); // hipStreamSynchronize(stream1); // hipStreamDestroy(stream); // hipStreamDestroy(stream1); hipEventRecord(stop_GPU, 0); hipEventSynchronize(start_GPU); hipEventSynchronize(stop_GPU); hipEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function_words memcpy:\t%f(ms)\n", time_GPU); hipEventDestroy(start_GPU); hipEventDestroy(stop_GPU); } __global__ static void Multiplication(float * d_A, float * d_B, float *result, int B) { float sum = 0; for(int i = 0;i<B;i++) { sum += d_A[blockIdx.x*B+i]*d_B[i*blockDim.x+threadIdx.x]; } result[blockIdx.x*blockDim.x+threadIdx.x] = sum; } void function_descriptors(const float *h_A, float *h_B, float *result,int A, int B, int C) { float time_GPU; hipEvent_t start_GPU, stop_GPU; hipEventCreate(&start_GPU); hipEventCreate(&stop_GPU); hipEventRecord(start_GPU, 0); hipMemcpy(d_A, h_A, sizeof(float) * A * B, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizeof(float) * B * C, hipMemcpyHostToDevice); hipLaunchKernelGGL(( Multiplication), dim3(A),dim3(C),0, 0, d_A,d_B,descriptor_gpu,B); hipEventRecord(stop_GPU, 0); hipEventSynchronize(start_GPU); hipEventSynchronize(stop_GPU); hipEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function_descriptors memcpy:\t%f(ms)\n", time_GPU); hipEventDestroy(start_GPU); hipEventDestroy(stop_GPU); } void gpuInit(char* inverted,char* Entries) { if (!InitCUDA()) { printf("initcuda error"); return; } for(int i = 0; i < 64; i++) { look_up_table[i] = ::exp(-1.0 * i * i / (24.0*24.0)); } hipMalloc((void**)&d_A, sizeof(float) * DATA_SIZE * 128); hipMalloc((void**)&d_B, sizeof(float) * 128*64);// hipMalloc((void**)&d_Result, sizeof(int) * DATA_SIZE * 64);// hipMalloc((void**)&result_gpu, sizeof(int) * DATA_SIZE * 100); hipMalloc((void**)&mat_gpu, sizeof(float) * DATA_SIZE*DATA_SIZE * 100);// hipMalloc((void**)&sum_mat_gpu, sizeof(int) * DOUBLE_DATA_SIZE * 100);// hipMalloc((void**)&descriptor_gpu, sizeof(float) * DATA_SIZE*64); hipMalloc((void**)&image_index_gpu, sizeof(int) * 10000); hipMalloc((void**)&look_up_table_gpu, sizeof(float) * 64); hipMalloc((void**)&wordsID_gpu, sizeof(int) * DATA_SIZE * 5 ); hipMalloc((void**)&inverted_gpu, sizeof(inverted_gpu0) * 32768); hipMalloc((void**)&Entries_gpu, sizeof(Entries_gpu0) * 2000*2000);// hipMemcpy(look_up_table_gpu, look_up_table, sizeof(float)*64, hipMemcpyHostToDevice); hipMemcpy(inverted_gpu, inverted, sizeof(inverted_gpu0) * 32768, hipMemcpyHostToDevice); hipMemcpy(Entries_gpu, Entries, sizeof(Entries_gpu0) * 2000*2000, hipMemcpyHostToDevice); hipMemset(mat_gpu, 0, sizeof(float) * DATA_SIZE*DATA_SIZE * 100); hipMemset(sum_mat_gpu, 0, sizeof(int) * DOUBLE_DATA_SIZE * 100); } void gpuRelese() { hipFree(d_A); hipFree(d_B); hipFree(d_Result); hipFree(wordsID_gpu); hipFree(inverted_gpu); hipFree(Entries_gpu); hipFree(mat_gpu); hipFree(sum_mat_gpu); hipFree(result_gpu); }
3ce06dc841f4f5cde2b76e2bcf95d543056dd64f.cu
#include<stdio.h> #include<iostream> #include<stdlib.h> #include<vector> #include<bitset> #include<cuda_runtime.h> #include<device_launch_parameters.h> #define DATA_SIZE 2000 #define DOUBLE_DATA_SIZE 4000 static float look_up_table[64]; static float* look_up_table_gpu; struct Entries_gpu0 { int imageId; int idxId; bool HMBM[64]; }; struct inverted_gpu0 { float thresholds[64]; float idf_weight; int indexInmap; // map_begin }; static float *mat_gpu; static int *sum_mat_gpu,*result_gpu,*wordsID_gpu; static inverted_gpu0 *inverted_gpu; static Entries_gpu0 *Entries_gpu; static int *image_index_gpu; static float *descriptor_gpu; static float *d_A; static float *d_B; static float *d_Result; bool InitCUDA() { int count; cudaGetDeviceCount(&count); printf("threr are %d device!",count); if(count == 0){ fprintf(stderr,"there is no device.\n"); return false; } int i; for(i = 0;i<count;i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } __global__ static void gpu_function(float * mat_g, int *sum_mat_g,int *result_g) { int indexId = blockIdx.x; float *mat = mat_g + indexId * DATA_SIZE * DATA_SIZE; int * sum_mat = sum_mat_g + indexId * DOUBLE_DATA_SIZE; __shared__ int smallIndex1[256]; __shared__ int sum_small1[256]; // __shared__ int sum_mat_shared_g[DOUBLE_DATA_SIZE]; // for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) // { // sum_mat_shared_g[index_query] = sum_mat_g[indexId * DOUBLE_DATA_SIZE + index_query]; // sum_mat_g[indexId * DOUBLE_DATA_SIZE + index_query] = 0; // } while(true) { __shared__ int sum_small; sum_small = 4000; __shared__ int smallIndex; smallIndex = -1; smallIndex1[threadIdx.x] = -1; sum_small1[threadIdx.x] = 4000; for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) { if(sum_mat[index_query] > 0 && sum_mat[index_query] < sum_small1[threadIdx.x]) { smallIndex1[threadIdx.x] = index_query; sum_small1[threadIdx.x] = sum_mat[index_query];//最小个数 } // if(sum_mat[index_query] == 1) // { // atomicExch(&sum_small, 1); // atomicExch(&smallIndex, index_query); // } } __syncthreads(); if(sum_small1[threadIdx.x] == 1) { atomicExch(&sum_small, 1); atomicExch(&smallIndex, smallIndex1[threadIdx.x]); } __syncthreads(); if(sum_small != 1) { if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x; index += 16) { if(sum_small1[index]>0 && sum_small1[threadIdx.x] > sum_small1[index]){ smallIndex1[threadIdx.x] = smallIndex1[index]; sum_small1[threadIdx.x] = sum_small1[index]; } } __syncthreads(); if(threadIdx.x==0) for(int index = 0; index < 16; index++) { if(sum_small1[index]>0 && sum_small > sum_small1[index]) { smallIndex = smallIndex1[index]; sum_small = sum_small1[index]; } } } __syncthreads(); if(sum_small == 4000) { return; } sum_mat[smallIndex] = 0; int bigDistIndex = -1; float distNumber = 0; __shared__ int bigDistI[256]; __shared__ float distNum[256]; bigDistI[threadIdx.x] = -1; distNum[threadIdx.x] = 0; __syncthreads(); if(smallIndex < DATA_SIZE) { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex * DATA_SIZE + index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[DATA_SIZE+index] --; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); #pragma unroll for(int index = 0; index < 16; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex+DATA_SIZE] = 0; result_g[indexId * DATA_SIZE + smallIndex] = bigDistIndex + 1; for(int indexIdx = threadIdx.x; indexIdx < DATA_SIZE; indexIdx += blockDim.x) { int temp2 = indexIdx * DATA_SIZE + bigDistIndex; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIdx]--; } } } else { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex - DATA_SIZE + DATA_SIZE * index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[index] --; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); #pragma unroll for(int index = 0; index < 16; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex] = 0; result_g[indexId * DATA_SIZE + bigDistIndex] = smallIndex - DATA_SIZE + 1; for(int indexIm = threadIdx.x; indexIm < DATA_SIZE; indexIm += blockDim.x) { int temp2 = indexIm + bigDistIndex * DATA_SIZE; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIm + DATA_SIZE]--; } } } __syncthreads(); } } __global__ static void gpu_functionNew(float * mat_g, int *sum_mat_g,int *result_g) { int indexId = blockIdx.x; if(indexId>99) return; float *mat = mat_g + indexId * DATA_SIZE * DATA_SIZE; int * sum_mat = sum_mat_g + indexId * DOUBLE_DATA_SIZE; __shared__ int smallIndex1[256]; __shared__ int sum_small1[256]; while(true) { __shared__ int sum_small; sum_small = 4000; __shared__ int smallIndex; smallIndex = -1; smallIndex1[threadIdx.x] = -1; sum_small1[threadIdx.x] = 4000; for(int index_query = threadIdx.x; index_query < DOUBLE_DATA_SIZE; index_query += blockDim.x ) { if(sum_mat[index_query] > 0 && sum_mat[index_query] < sum_small1[threadIdx.x] && index_query < DOUBLE_DATA_SIZE) { smallIndex1[threadIdx.x] = index_query; sum_small1[threadIdx.x] = sum_mat[index_query];//最小个数 } if(sum_mat[index_query] == 1) { sum_small = 1; smallIndex = index_query; } } __syncthreads(); if(sum_small != 1) { if(threadIdx.x<16) for(int index = threadIdx.x; index < blockDim.x; index += 16) { if(sum_small1[index]>0 && sum_small1[threadIdx.x] > sum_small1[index]){ smallIndex1[threadIdx.x] = smallIndex1[index]; sum_small1[threadIdx.x] = sum_small1[index]; } } __syncthreads(); if(threadIdx.x==0) for(int index = 0; index < 16; index++) { if(sum_small1[index]>0 && sum_small > sum_small1[index]) { smallIndex = smallIndex1[index]; sum_small = sum_small1[index]; } } } __syncthreads(); if(sum_small == 4000) return; sum_mat[smallIndex] = 0; int bigDistIndex = -1; float distNumber = 0; __shared__ int bigDistI[256]; __shared__ float distNum[256]; bigDistI[threadIdx.x] = -1; distNum[threadIdx.x] = 0; __syncthreads(); if(smallIndex < DATA_SIZE) { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex * DATA_SIZE + index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[DATA_SIZE+index] --; } } __syncthreads(); if(threadIdx.x<64) for(int index = threadIdx.x; index < blockDim.x ; index += 64) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < 64 ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<4) for(int index = threadIdx.x; index < 16 ; index += 4) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); for(int index = 0; index < 4; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex+DATA_SIZE] = 0; result_g[indexId * DATA_SIZE + smallIndex] = bigDistIndex + 1; for(int indexIdx = threadIdx.x; indexIdx < DATA_SIZE; indexIdx += blockDim.x) { int temp2 = indexIdx * DATA_SIZE + bigDistIndex; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIdx]--; } } } else { for(int index = threadIdx.x; index < DATA_SIZE; index += blockDim.x) { int temp1 = smallIndex - DATA_SIZE + DATA_SIZE * index; if(mat[temp1] > 0) { if(mat[temp1] > distNum[threadIdx.x]) { bigDistI[threadIdx.x] = index; distNum[threadIdx.x] = mat[temp1]; } mat[temp1] = 0; sum_mat[index] --; } } __syncthreads(); if(threadIdx.x<64) for(int index = threadIdx.x; index < blockDim.x ; index += 64) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<16) for(int index = threadIdx.x; index < 64 ; index += 16) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); if(threadIdx.x<4) for(int index = threadIdx.x; index < 16 ; index += 4) { if(distNum[index] > distNum[threadIdx.x]) { distNum[threadIdx.x] = distNum[index]; bigDistI[threadIdx.x] = bigDistI[index]; } } __syncthreads(); for(int index = 0; index < 4; index++) { if(distNum[index] > distNumber) { bigDistIndex = bigDistI[index]; distNumber = distNum[index]; } } __syncthreads(); sum_mat[bigDistIndex] = 0; result_g[indexId * DATA_SIZE + bigDistIndex] = smallIndex - DATA_SIZE + 1; for(int indexIm = threadIdx.x; indexIm < DATA_SIZE; indexIm += blockDim.x) { int temp2 = indexIm + bigDistIndex * DATA_SIZE; if (mat[temp2] > 0) { mat[temp2] = 0; sum_mat[indexIm + DATA_SIZE]--; } } } __syncthreads(); } } void function1(int *result) { // cudaStream_t stream, stream1; // cudaStreamCreate(&stream); // cudaStreamCreate(&stream1); dim3 blockdim(256,1,1); dim3 griddim(64,1,1); float time_GPU; cudaEvent_t start_GPU, stop_GPU; cudaEventCreate(&start_GPU); cudaEventCreate(&stop_GPU); cudaEventRecord(start_GPU, 0); cudaMemset(result_gpu, 0, sizeof(int) * DATA_SIZE * 100); gpu_function<< <griddim ,blockdim, 0 >> >(mat_gpu, sum_mat_gpu, result_gpu); cudaEventRecord(stop_GPU, 0); cudaEventSynchronize(start_GPU); cudaEventSynchronize(stop_GPU); cudaEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function1:\t%f(ms)\n", time_GPU); cudaEventDestroy(start_GPU); cudaEventDestroy(stop_GPU); //gpu_function<< <griddim ,blockdim, 0 >> >(mat_gpu+DATA_SIZE*DATA_SIZE*50, sum_mat_gpu + DOUBLE_DATA_SIZE*50, result_gpu+ DATA_SIZE*50); // cudaStreamSynchronize(stream); // cudaStreamSynchronize(stream1); // cudaStreamDestroy(stream); // cudaStreamDestroy(stream1); // static float *mat_cpu = new float[DATA_SIZE*DATA_SIZE * 100]; // static int *sum_mat_cpu = new int[DATA_SIZE*200]; // cudaMemcpy(mat_cpu, mat_gpu, sizeof(float) * DATA_SIZE*DATA_SIZE * 100, cudaMemcpyDeviceToHost); // cudaMemcpy(sum_mat_cpu, sum_mat_gpu, sizeof(int) * DATA_SIZE * 200, cudaMemcpyDeviceToHost); // int a = 0; // int b = 0; // for(int i = 0;i<DATA_SIZE*DATA_SIZE * 100;i++) // if(mat_cpu[i]>0) a++; // for(int i = 0; i<2000*200;i++) // if(sum_mat_cpu[i]!=0) // b++; // printf("a:%d\t",a);printf("b:%d\n",b); cudaMemcpy(result, result_gpu, sizeof(int) * DATA_SIZE * 100, cudaMemcpyDeviceToHost); } __global__ static void gpu_words1(int* wordsId,Entries_gpu0* Entries_gpu, inverted_gpu0* inverted_gpu,float * mat_g, int *sum_mat_g,int * image_index, float * LUTGpu,float *descriptor,int size) { for(int blockIndex = blockIdx.x;blockIndex<size;blockIndex += gridDim.x) { int word_id ; //= wordsId[blockIndex*blockDim.x+threadIdx.x]; for(int i = 0; i<5; i++) { word_id = wordsId[blockIndex*5+i]; if(word_id <1000000) { inverted_gpu0 temp = inverted_gpu[word_id]; __shared__ bool des[64]; for(int i = threadIdx.y; i<64; i += blockDim.y) { des[i] = (descriptor[blockIndex * 64 + i] > inverted_gpu[word_id].thresholds[i]); } float idf_weight = temp.idf_weight; float squared_idf_weight = idf_weight * idf_weight; int match_begin = temp.indexInmap; int match_end = inverted_gpu[word_id+1].indexInmap; __syncthreads(); // if(match_end - match_begin > 2000 ) // { // printf("run-%d\t",match_end - match_begin); // break; // match_end = match_begin +2000; // } for(int index = match_begin + threadIdx.y; index < match_end; index += blockDim.y) { Entries_gpu0 temp_entrie = Entries_gpu[index]; if(image_index[temp_entrie.imageId] < 0) continue; size_t hamming_dist = 0; for(int j = 0; j < 64; j++) { if(des[j] ^ temp_entrie.HMBM[j]) { hamming_dist ++;//= (des[j] ^ temp_entrie.HMBM[j]); } } if (hamming_dist <= 24) { const float dist = LUTGpu[hamming_dist] * squared_idf_weight; int image_id = temp_entrie.imageId; int i = blockIndex; int feature_idx = temp_entrie.idxId; if (i < DATA_SIZE && feature_idx < DATA_SIZE) { int index_match = image_index[image_id] * DATA_SIZE * DATA_SIZE + i * DATA_SIZE + feature_idx; if (mat_g[index_match] == 0) { atomicAdd(&sum_mat_g[image_index[image_id] * DOUBLE_DATA_SIZE + i], 1); atomicAdd(&sum_mat_g[image_index[image_id] * DOUBLE_DATA_SIZE + DATA_SIZE + feature_idx], 1); mat_g[index_match] = float(dist + 1); } else if (mat_g[index_match] < float(dist + 1)) { mat_g[index_match] = float(dist + 1); } } } } } __syncthreads(); } } } __global__ static void gpu_words2(float * mat_g, int *sum_mat_g) { int index_image = blockIdx.x; int sum = 0; for(int index_idx = threadIdx.x; index_idx<DATA_SIZE; index_idx+=blockDim.x) { for(int index = 0; index < DATA_SIZE; index++) if(mat_g[index_image*DATA_SIZE*DATA_SIZE + index_idx*DATA_SIZE + index]) sum++; sum_mat_g[index_image*DOUBLE_DATA_SIZE + index_idx] = sum; sum = 0; } for(int index_quere = threadIdx.x; index_quere<DATA_SIZE; index_quere+=blockDim.x) { for(int index = 0; index < DATA_SIZE; index++) if(mat_g[index_image*DATA_SIZE*DATA_SIZE + index_quere + DATA_SIZE*index]) sum++; sum_mat_g[index_image*DOUBLE_DATA_SIZE + DATA_SIZE + index_quere] = sum; sum = 0; } } void function_words(int * imageIndex, float *descriptor, int * wordsId, int size) { float time_GPU; cudaEvent_t start_GPU, stop_GPU; cudaEventCreate(&start_GPU); cudaEventCreate(&stop_GPU); cudaEventRecord(start_GPU, 0); // cudaStream_t stream, stream1; // cudaStreamCreate(&stream); // cudaStreamCreate(&stream1); dim3 blockdim(1,64,1); int size_mall; if(size >DATA_SIZE) size_mall = DATA_SIZE; else size_mall = size; dim3 griddim(size_mall,1,1); cudaMemcpy(image_index_gpu, imageIndex, sizeof(int)*10000, cudaMemcpyHostToDevice); cudaMemcpy(wordsID_gpu, wordsId, sizeof(int)*size_mall*5, cudaMemcpyHostToDevice); gpu_words1<< <griddim ,blockdim, 0 >> >(wordsID_gpu,Entries_gpu, inverted_gpu,mat_gpu, sum_mat_gpu, image_index_gpu,look_up_table_gpu,descriptor_gpu,size_mall); //gpu_words2<<<100,size/4+1>>>(mat_gpu,sum_mat_gpu); // gpu_words1<< <griddim ,blockdim, 0 ,stream1 >> >(wordsID_gpu + 500,Entries_gpu, // inverted_gpu,mat_gpu, sum_mat_gpu, // image_index_gpu,look_up_table_gpu,descriptor_gpu,size_mall - 500); // cudaStreamSynchronize(stream); // cudaStreamSynchronize(stream1); // cudaStreamDestroy(stream); // cudaStreamDestroy(stream1); cudaEventRecord(stop_GPU, 0); cudaEventSynchronize(start_GPU); cudaEventSynchronize(stop_GPU); cudaEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function_words memcpy:\t%f(ms)\n", time_GPU); cudaEventDestroy(start_GPU); cudaEventDestroy(stop_GPU); } __global__ static void Multiplication(float * d_A, float * d_B, float *result, int B) { float sum = 0; for(int i = 0;i<B;i++) { sum += d_A[blockIdx.x*B+i]*d_B[i*blockDim.x+threadIdx.x]; } result[blockIdx.x*blockDim.x+threadIdx.x] = sum; } void function_descriptors(const float *h_A, float *h_B, float *result,int A, int B, int C) { float time_GPU; cudaEvent_t start_GPU, stop_GPU; cudaEventCreate(&start_GPU); cudaEventCreate(&stop_GPU); cudaEventRecord(start_GPU, 0); cudaMemcpy(d_A, h_A, sizeof(float) * A * B, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeof(float) * B * C, cudaMemcpyHostToDevice); Multiplication<<<A,C,0>>>(d_A,d_B,descriptor_gpu,B); cudaEventRecord(stop_GPU, 0); cudaEventSynchronize(start_GPU); cudaEventSynchronize(stop_GPU); cudaEventElapsedTime(&time_GPU, start_GPU, stop_GPU); printf("The time for function_descriptors memcpy:\t%f(ms)\n", time_GPU); cudaEventDestroy(start_GPU); cudaEventDestroy(stop_GPU); } void gpuInit(char* inverted,char* Entries) { if (!InitCUDA()) { printf("initcuda error"); return; } for(int i = 0; i < 64; i++) { look_up_table[i] = std::exp(-1.0 * i * i / (24.0*24.0)); } cudaMalloc((void**)&d_A, sizeof(float) * DATA_SIZE * 128); cudaMalloc((void**)&d_B, sizeof(float) * 128*64);//邻接矩阵 cudaMalloc((void**)&d_Result, sizeof(int) * DATA_SIZE * 64);//单对多个数 cudaMalloc((void**)&result_gpu, sizeof(int) * DATA_SIZE * 100); cudaMalloc((void**)&mat_gpu, sizeof(float) * DATA_SIZE*DATA_SIZE * 100);//邻接矩阵 cudaMalloc((void**)&sum_mat_gpu, sizeof(int) * DOUBLE_DATA_SIZE * 100);//单对多个数 cudaMalloc((void**)&descriptor_gpu, sizeof(float) * DATA_SIZE*64); cudaMalloc((void**)&image_index_gpu, sizeof(int) * 10000); cudaMalloc((void**)&look_up_table_gpu, sizeof(float) * 64); cudaMalloc((void**)&wordsID_gpu, sizeof(int) * DATA_SIZE * 5 ); cudaMalloc((void**)&inverted_gpu, sizeof(inverted_gpu0) * 32768); cudaMalloc((void**)&Entries_gpu, sizeof(Entries_gpu0) * 2000*2000);//邻接矩阵 cudaMemcpy(look_up_table_gpu, look_up_table, sizeof(float)*64, cudaMemcpyHostToDevice); cudaMemcpy(inverted_gpu, inverted, sizeof(inverted_gpu0) * 32768, cudaMemcpyHostToDevice); cudaMemcpy(Entries_gpu, Entries, sizeof(Entries_gpu0) * 2000*2000, cudaMemcpyHostToDevice); cudaMemset(mat_gpu, 0, sizeof(float) * DATA_SIZE*DATA_SIZE * 100); cudaMemset(sum_mat_gpu, 0, sizeof(int) * DOUBLE_DATA_SIZE * 100); } void gpuRelese() { cudaFree(d_A); cudaFree(d_B); cudaFree(d_Result); cudaFree(wordsID_gpu); cudaFree(inverted_gpu); cudaFree(Entries_gpu); cudaFree(mat_gpu); cudaFree(sum_mat_gpu); cudaFree(result_gpu); }
ff36e8e13b1ef7555c81c3f962dbb2fd49571139.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" //#include "mex.h" /* Kernel to square elements of the array on the GPU */ /* typedef struct{ int width; int height; int stride; float * elements; } Matrix; /// __device__ float GetVectorElement(const Matrix A, int row, int offset){ return A.elements[row * VECTOR_BLOCK_SIZE + offset]; } ///?????????????????? __device__ void setVectorElement(Matrix A, int row, int offset, float value){ A.elements[row * VECTOR_BLOCK_SIZE + offset] = value; } __device__ Matrix GetSubVector(Matrix A, int row){ Matrix Asub; Asub.width = 1; Asub.height = VECTOR_BLOCK_SIZE; Asub.stride = 1; Asub.elements = & A.elements[row * VECTOR_BLOCK_SIZE] } */ /* N size of Vector */ __global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx > N) return; if(idx == 0)out[blockIdx.x] = 0; __syncthreads(); //block index int blockRow = blockIdx.x; // thread index int row = threadIdx.x; int aBegin = blockRow*VECTOR_BLOCK_SIZE; int aEnd = aBegin + VECTOR_BLOCK_SIZE - 1; int aStep = VECTOR_BLOCK_SIZE; // // comupted by the thread t_ve outValue = 0; //for (int a = aBegin;(a <= aEnd)&&(a <= N);a += aStep){ for (int a = aBegin;(a <= aEnd);a += aStep){ // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[VECTOR_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[VECTOR_BLOCK_SIZE]; __shared__ float Cs[VECTOR_BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(row) = in1[a + row]; BS(row) = in2[a + row]; // Synchronize to make sure the matrices are loaded __syncthreads(); Cs[row] = AS(row) * BS(row); /* // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(k < N); ++k) //for (int k = 0; (k < VECTOR_BLOCK_SIZE); ++k) outValue += AS(k) * BS(k); */ // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); if (row == 0) { for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++) out[blockIdx.x] += Cs[k]; //out[0] += 1; //outValue += 1; } __syncthreads(); } //__syncthreads(); if(idx==0){ for(int k = 1; k <= gridDim.x; k++)out[0] += out[k]; } //out[0] = outValue; // __syncthreads(); }
ff36e8e13b1ef7555c81c3f962dbb2fd49571139.cu
#include "cuda.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" //#include "mex.h" /* Kernel to square elements of the array on the GPU */ /* typedef struct{ int width; int height; int stride; float * elements; } Matrix; /// __device__ float GetVectorElement(const Matrix A, int row, int offset){ return A.elements[row * VECTOR_BLOCK_SIZE + offset]; } ///?????????????????? __device__ void setVectorElement(Matrix A, int row, int offset, float value){ A.elements[row * VECTOR_BLOCK_SIZE + offset] = value; } __device__ Matrix GetSubVector(Matrix A, int row){ Matrix Asub; Asub.width = 1; Asub.height = VECTOR_BLOCK_SIZE; Asub.stride = 1; Asub.elements = & A.elements[row * VECTOR_BLOCK_SIZE] } */ /* N size of Vector */ __global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx > N) return; if(idx == 0)out[blockIdx.x] = 0; __syncthreads(); //block index int blockRow = blockIdx.x; // thread index int row = threadIdx.x; int aBegin = blockRow*VECTOR_BLOCK_SIZE; int aEnd = aBegin + VECTOR_BLOCK_SIZE - 1; int aStep = VECTOR_BLOCK_SIZE; // // comupted by the thread t_ve outValue = 0; //for (int a = aBegin;(a <= aEnd)&&(a <= N);a += aStep){ for (int a = aBegin;(a <= aEnd);a += aStep){ // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[VECTOR_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[VECTOR_BLOCK_SIZE]; __shared__ float Cs[VECTOR_BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(row) = in1[a + row]; BS(row) = in2[a + row]; // Synchronize to make sure the matrices are loaded __syncthreads(); Cs[row] = AS(row) * BS(row); /* // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(k < N); ++k) //for (int k = 0; (k < VECTOR_BLOCK_SIZE); ++k) outValue += AS(k) * BS(k); */ // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); if (row == 0) { for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++) out[blockIdx.x] += Cs[k]; //out[0] += 1; //outValue += 1; } __syncthreads(); } //__syncthreads(); if(idx==0){ for(int k = 1; k <= gridDim.x; k++)out[0] += out[k]; } //out[0] = outValue; // __syncthreads(); }
bccdc2497493d3eae5d6e1540b8841fc63fd4c2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void aKernel() { int idx = blockDim.x * blockIdx.x + threadIdx.x; int r1, r2, res_diff; __shared__ int arr[512]; arr[idx] = idx; r1 = arr[idx]; printf("A: Thread %5d, value %5d\n", idx, arr[idx]); if (idx < 511) arr[idx] = arr[idx + 1]; r2 = arr[idx]; res_diff = r2 - r1; printf("B: Thread %5d, value %5d, d=%5d\n", idx, arr[idx], res_diff); } int main() { hipLaunchKernelGGL(( aKernel) , dim3(1), dim3(512), 0, 0, ); return 0; }
bccdc2497493d3eae5d6e1540b8841fc63fd4c2e.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void aKernel() { int idx = blockDim.x * blockIdx.x + threadIdx.x; int r1, r2, res_diff; __shared__ int arr[512]; arr[idx] = idx; r1 = arr[idx]; printf("A: Thread %5d, value %5d\n", idx, arr[idx]); if (idx < 511) arr[idx] = arr[idx + 1]; r2 = arr[idx]; res_diff = r2 - r1; printf("B: Thread %5d, value %5d, d=%5d\n", idx, arr[idx], res_diff); } int main() { aKernel <<<1, 512>>> (); return 0; }
e0cf867cd2b027529f0938431cff4132ab25e1d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c */ #include "magma_internal.h" /******************************************************************************/ /* * Swap diagonal blocks of two matrices. * Each thread block swaps one diagonal block. * Each thread iterates across one row of the block. */ __global__ void zswapdblk_kernel( int nb, magmaDoubleComplex *dA, int ldda, int inca, magmaDoubleComplex *dB, int lddb, int incb ) { const int tx = threadIdx.x; const int bx = blockIdx.x; dA += tx + bx * nb * (ldda + inca); dB += tx + bx * nb * (lddb + incb); magmaDoubleComplex tmp; #pragma unroll for( int i = 0; i < nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } /***************************************************************************//** Purpose ------- zswapdblk swaps diagonal blocks of size nb x nb between matrices dA and dB on the GPU. It swaps nblocks = n/nb blocks. For i = 1 .. nblocks, submatrices dA( i*nb*inca, i*nb ) and dB( i*nb*incb, i*nb ) are swapped. Arguments --------- @param[in] n INTEGER The number of columns of the matrices dA and dB. N >= 0. @param[in] nb INTEGER The size of diagonal blocks. NB > 0 and NB <= maximum threads per CUDA block (512 or 1024). @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= (nblocks - 1)*nb*inca + nb. @param[in] inca INTEGER The row increment between diagonal blocks of dA. inca >= 0. For example, inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb), inca = 0 means blocks are stored side-by-side at dA(0, i*nb). @param[in,out] dB COMPLEX_16 array, dimension (LDDB,N) The matrix dB. @param[in] lddb INTEGER The leading dimension of the array db. LDDB >= (nblocks - 1)*nb*incb + nb. @param[in] incb INTEGER The row increment between diagonal blocks of dB. incb >= 0. See inca. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_swapdblk *******************************************************************************/ extern "C" void magmablas_zswapdblk_q( magma_int_t n, magma_int_t nb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t inca, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t incb, magma_queue_t queue ) { magma_int_t nblocks = n / nb; magma_int_t info = 0; if (n < 0) { info = -1; } else if (nb < 1 || nb > 1024) { info = -2; } else if (ldda < (nblocks-1)*nb*inca + nb) { info = -4; } else if (inca < 0) { info = -5; } else if (lddb < (nblocks-1)*nb*incb + nb) { info = -7; } else if (incb < 0) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( nblocks > 0 ) { hipLaunchKernelGGL(( zswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() , nb, dA, ldda, inca, dB, lddb, incb ); } }
e0cf867cd2b027529f0938431cff4132ab25e1d4.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c */ #include "magma_internal.h" /******************************************************************************/ /* * Swap diagonal blocks of two matrices. * Each thread block swaps one diagonal block. * Each thread iterates across one row of the block. */ __global__ void zswapdblk_kernel( int nb, magmaDoubleComplex *dA, int ldda, int inca, magmaDoubleComplex *dB, int lddb, int incb ) { const int tx = threadIdx.x; const int bx = blockIdx.x; dA += tx + bx * nb * (ldda + inca); dB += tx + bx * nb * (lddb + incb); magmaDoubleComplex tmp; #pragma unroll for( int i = 0; i < nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } /***************************************************************************//** Purpose ------- zswapdblk swaps diagonal blocks of size nb x nb between matrices dA and dB on the GPU. It swaps nblocks = n/nb blocks. For i = 1 .. nblocks, submatrices dA( i*nb*inca, i*nb ) and dB( i*nb*incb, i*nb ) are swapped. Arguments --------- @param[in] n INTEGER The number of columns of the matrices dA and dB. N >= 0. @param[in] nb INTEGER The size of diagonal blocks. NB > 0 and NB <= maximum threads per CUDA block (512 or 1024). @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= (nblocks - 1)*nb*inca + nb. @param[in] inca INTEGER The row increment between diagonal blocks of dA. inca >= 0. For example, inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb), inca = 0 means blocks are stored side-by-side at dA(0, i*nb). @param[in,out] dB COMPLEX_16 array, dimension (LDDB,N) The matrix dB. @param[in] lddb INTEGER The leading dimension of the array db. LDDB >= (nblocks - 1)*nb*incb + nb. @param[in] incb INTEGER The row increment between diagonal blocks of dB. incb >= 0. See inca. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_swapdblk *******************************************************************************/ extern "C" void magmablas_zswapdblk_q( magma_int_t n, magma_int_t nb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t inca, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t incb, magma_queue_t queue ) { magma_int_t nblocks = n / nb; magma_int_t info = 0; if (n < 0) { info = -1; } else if (nb < 1 || nb > 1024) { info = -2; } else if (ldda < (nblocks-1)*nb*inca + nb) { info = -4; } else if (inca < 0) { info = -5; } else if (lddb < (nblocks-1)*nb*incb + nb) { info = -7; } else if (incb < 0) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( nblocks > 0 ) { zswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>> ( nb, dA, ldda, inca, dB, lddb, incb ); } }
f23d3cec353acb1c007b75c3e5a79ff8957cfd8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/deformable_ps_roi_pooling/kimpl/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/deformable_ps_roi_pooling/kimpl/kern.cuh" #include "src/cuda/query_blocksize.cuh" namespace { using Param = megdnn::cuda::deformable_ps_roi_pooling::Param; __device__ float bilinear_interp( const float* data, const int IH, const int IW, const float h, const float w) { int h1 = floor(h), h2 = ceil(h); int w1 = floor(w), w2 = ceil(w); float dist_h = (float)(h - h1); float dist_w = (float)(w - w1); float value11 = data[h1 * IW + w1]; float value12 = data[h2 * IW + w1]; float value21 = data[h1 * IW + w2]; float value22 = data[h2 * IW + w2]; float value = (1 - dist_w) * (1 - dist_h) * value11 + (1 - dist_w) * dist_h * value12 + dist_w * (1 - dist_h) * value21 + dist_w * dist_h * value22; return value; } __global__ void DeformablePSROIPoolForwardKern( Param p, const float* data, const float* rois, const float* trans, float* out_data, float* out_count) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; const int icpcls = p.IC / p.nr_cls; KERN_FOR(idx, loops) { const int pw = idx % p.pool_w; const int ph = (idx / p.pool_w) % p.pool_h; const int ic = (idx / p.pool_w / p.pool_h) % p.IC; const int n = (idx / p.pool_w / p.pool_h / p.IC); const float* rois_ptr = &rois[n * 5]; int roi_batch_idx = rois_ptr[0]; float roi_w_l = static_cast<float>(round(rois_ptr[1])) * p.scale - 0.5; float roi_h_l = static_cast<float>(round(rois_ptr[2])) * p.scale - 0.5; float roi_w_r = static_cast<float>(round(rois_ptr[3]) + 1.) * p.scale - 0.5; float roi_h_r = static_cast<float>(round(rois_ptr[4]) + 1.) * p.scale - 0.5; // Force too small ROIs to be 1x1 float roi_w = max(roi_w_r - roi_w_l, 0.1); // avoid 0 float roi_h = max(roi_h_r - roi_h_l, 0.1); // Compute w and h at bottom float bin_sz_h = roi_h / static_cast<float>(p.pool_h); float bin_sz_w = roi_w / static_cast<float>(p.pool_w); float sub_bin_sz_h = bin_sz_h / static_cast<float>(p.sample_per_part); float sub_bin_sz_w = bin_sz_w / static_cast<float>(p.sample_per_part); int count = 0; int cls_id = ic / icpcls; float sum = 0, trans_x = 0, trans_y = 0; float hstart = static_cast<float>(ph) * bin_sz_h + roi_h_l; float wstart = static_cast<float>(pw) * bin_sz_w + roi_w_l; if (!p.no_trans) { int part_h = floor(static_cast<float>(ph) / p.pool_h * p.part_sz); int part_w = floor(static_cast<float>(pw) / p.pool_w * p.part_sz); int x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; trans_x = trans[x_idx] * static_cast<float>(p.trans_std); trans_y = trans[y_idx] * static_cast<float>(p.trans_std); } wstart += trans_x * roi_w; hstart += trans_y * roi_h; const float* data_ptr = data + (roi_batch_idx * p.IC + ic) * p.IH * p.IW; for (int ih = 0; ih < p.sample_per_part; ih++) { for (int iw = 0; iw < p.sample_per_part; iw++) { float w = wstart + iw * sub_bin_sz_w; float h = hstart + ih * sub_bin_sz_h; // bilinear interpolation if (w < -0.5 || w > p.IW - 0.5 || h < -0.5 || h > p.IH - 0.5) continue; w = min(max(w, 0.), p.IW - 1.); h = min(max(h, 0.), p.IH - 1.); float val = bilinear_interp(data_ptr, p.IH, p.IW, h, w); sum += val, count++; } } out_data[idx] = count == 0 ? (float)(0) : sum / count; out_count[idx] = count; } } __global__ void DeformablePSROIPoolBackwardAccKern( Param p, const float* data, const float* rois, const float* trans, const float* out_diff, const float* out_count, float* data_diff, float* trans_diff) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; const int icpcls = p.IC / p.nr_cls; KERN_FOR(idx, loops) { const int pw = idx % p.pool_w; const int ph = (idx / p.pool_w) % p.pool_h; const int ic = (idx / p.pool_w / p.pool_h) % p.IC; const int n = (idx / p.pool_w / p.pool_h / p.IC); const float* rois_ptr = &rois[n * 5]; int roi_batch_idx = rois_ptr[0]; float roi_w_l = static_cast<float>(round(rois_ptr[1])) * p.scale - 0.5; float roi_h_l = static_cast<float>(round(rois_ptr[2])) * p.scale - 0.5; float roi_w_r = static_cast<float>(round(rois_ptr[3]) + 1.) * p.scale - 0.5; float roi_h_r = static_cast<float>(round(rois_ptr[4]) + 1.) * p.scale - 0.5; // Force too small ROIs to be 1x1 float roi_w = max(roi_w_r - roi_w_l, 0.1); // avoid 0 float roi_h = max(roi_h_r - roi_h_l, 0.1); // Compute w and h at bottom float bin_sz_h = roi_h / static_cast<float>(p.pool_h); float bin_sz_w = roi_w / static_cast<float>(p.pool_w); float sub_bin_sz_h = bin_sz_h / static_cast<float>(p.sample_per_part); float sub_bin_sz_w = bin_sz_w / static_cast<float>(p.sample_per_part); int part_h = 0, part_w = 0, cls_id = ic / icpcls; float trans_x = 0, trans_y = 0; float wstart = static_cast<float>(pw) * bin_sz_w + roi_w_l; float hstart = static_cast<float>(ph) * bin_sz_h + roi_h_l; if (!p.no_trans) { part_h = floor(static_cast<float>(ph) / p.pool_h * p.part_sz); part_w = floor(static_cast<float>(pw) / p.pool_w * p.part_sz); int x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; trans_x = trans[x_idx] * static_cast<float>(p.trans_std); trans_y = trans[y_idx] * static_cast<float>(p.trans_std); } wstart += trans_x * roi_w; hstart += trans_y * roi_h; if (out_count[idx] <= 0) continue; float diff_val = out_diff[idx] / out_count[idx]; const int data_idx = (roi_batch_idx * p.IC + ic) * p.IH * p.IW; float* data_diff_ptr; const float* data_ptr; for (int ih = 0; ih < p.sample_per_part; ih++) { for (int iw = 0; iw < p.sample_per_part; iw++) { float w = wstart + iw * sub_bin_sz_w; float h = hstart + ih * sub_bin_sz_h; // bilinear interpolation if (w < -0.5 || w > p.IW - 0.5 || h < -0.5 || h > p.IH - 0.5) continue; w = min(max(w, 0.), p.IW - 1.), h = min(max(h, 0.), p.IH - 1.); // backward on feature int x0 = floor(w), x1 = ceil(w); int y0 = floor(h), y1 = ceil(h); float dist_x = w - x0, dist_y = h - y0; float q00 = (1 - dist_x) * (1 - dist_y); float q01 = (1 - dist_x) * dist_y; float q10 = dist_x * (1 - dist_y); float q11 = dist_x * dist_y; data_diff_ptr = &data_diff[data_idx]; atomicAdd(&data_diff_ptr[y0 * p.IW + x0], q00 * diff_val); atomicAdd(&data_diff_ptr[y1 * p.IW + x0], q01 * diff_val); atomicAdd(&data_diff_ptr[y0 * p.IW + x1], q10 * diff_val); atomicAdd(&data_diff_ptr[y1 * p.IW + x1], q11 * diff_val); if (p.no_trans) continue; data_ptr = &data[data_idx]; float U00 = data_ptr[y0 * p.IW + x0]; float U01 = data_ptr[y1 * p.IW + x0]; float U10 = data_ptr[y0 * p.IW + x1]; float U11 = data_ptr[y1 * p.IW + x1]; float diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * p.trans_std * diff_val; float diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * p.trans_std * diff_val; diff_x *= roi_w, diff_y *= roi_h; int diff_x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int diff_y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; atomicAdd(&trans_diff[diff_x_idx], diff_x); atomicAdd(&trans_diff[diff_y_idx], diff_y); } } } } } // namespace namespace megdnn { namespace cuda { namespace deformable_ps_roi_pooling { void DeformablePSROIPoolForward( const TensorND& data, const TensorND& rois, const TensorND& trans, const TensorND& out_data, const TensorND& out_count, Param& p) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; int nr_thds = query_blocksize_for_kernel(DeformablePSROIPoolForwardKern); const int blks = DIVUP(loops, nr_thds); const float* data_ptr = data.ptr<float>(); const float* rois_ptr = rois.ptr<float>(); const float* trans_ptr = p.no_trans ? NULL : trans.ptr<float>(); float* out_data_ptr = out_data.ptr<float>(); float* out_count_ptr = out_count.ptr<float>(); auto&& out_data_elems = out_data.layout.total_nr_elems(); auto&& out_count_elems = out_count.layout.total_nr_elems(); size_t out_data_bytes = sizeof(float) * out_data_elems; size_t out_count_bytes = sizeof(float) * out_count_elems; hipMemsetAsync(out_data_ptr, 0, out_data_bytes, p.stream); hipMemsetAsync(out_count_ptr, 0, out_count_bytes, p.stream); hipLaunchKernelGGL(( DeformablePSROIPoolForwardKern), dim3(blks), dim3(nr_thds), 0, p.stream, p, data_ptr, rois_ptr, trans_ptr, out_data_ptr, out_count_ptr); after_kernel_launch(); } void DeformablePSROIPoolBackwardAcc( const TensorND& data, const TensorND& rois, const TensorND& trans, const TensorND& out_diff, const TensorND& out_count, const TensorND& data_diff, const TensorND& trans_diff, Param& p) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; int nr_thds = query_blocksize_for_kernel(DeformablePSROIPoolBackwardAccKern); const int blks = DIVUP(loops, nr_thds); const float* data_ptr = data.ptr<float>(); const float* rois_ptr = rois.ptr<float>(); const float* trans_ptr = p.no_trans ? NULL : trans.ptr<float>(); const float* out_diff_ptr = out_diff.ptr<float>(); const float* out_count_ptr = out_count.ptr<float>(); float* data_diff_ptr = data_diff.ptr<float>(); float* trans_diff_ptr = trans_diff.ptr<float>(); auto&& data_diff_elems = data_diff.layout.total_nr_elems(); auto&& trans_diff_elems = trans_diff.layout.total_nr_elems(); size_t data_diff_bytes = sizeof(float) * data_diff_elems; size_t trans_diff_bytes = sizeof(float) * trans_diff_elems; hipMemsetAsync(data_diff_ptr, 0, data_diff_bytes, p.stream); hipMemsetAsync(trans_diff_ptr, 0, trans_diff_bytes, p.stream); hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKern), dim3(blks), dim3(nr_thds), 0, p.stream, p, data_ptr, rois_ptr, trans_ptr, out_diff_ptr, out_count_ptr, data_diff_ptr, trans_diff_ptr); after_kernel_launch(); } } // namespace deformable_ps_roi_pooling } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
f23d3cec353acb1c007b75c3e5a79ff8957cfd8e.cu
/** * \file dnn/src/cuda/deformable_ps_roi_pooling/kimpl/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/deformable_ps_roi_pooling/kimpl/kern.cuh" #include "src/cuda/query_blocksize.cuh" namespace { using Param = megdnn::cuda::deformable_ps_roi_pooling::Param; __device__ float bilinear_interp( const float* data, const int IH, const int IW, const float h, const float w) { int h1 = floor(h), h2 = ceil(h); int w1 = floor(w), w2 = ceil(w); float dist_h = (float)(h - h1); float dist_w = (float)(w - w1); float value11 = data[h1 * IW + w1]; float value12 = data[h2 * IW + w1]; float value21 = data[h1 * IW + w2]; float value22 = data[h2 * IW + w2]; float value = (1 - dist_w) * (1 - dist_h) * value11 + (1 - dist_w) * dist_h * value12 + dist_w * (1 - dist_h) * value21 + dist_w * dist_h * value22; return value; } __global__ void DeformablePSROIPoolForwardKern( Param p, const float* data, const float* rois, const float* trans, float* out_data, float* out_count) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; const int icpcls = p.IC / p.nr_cls; KERN_FOR(idx, loops) { const int pw = idx % p.pool_w; const int ph = (idx / p.pool_w) % p.pool_h; const int ic = (idx / p.pool_w / p.pool_h) % p.IC; const int n = (idx / p.pool_w / p.pool_h / p.IC); const float* rois_ptr = &rois[n * 5]; int roi_batch_idx = rois_ptr[0]; float roi_w_l = static_cast<float>(round(rois_ptr[1])) * p.scale - 0.5; float roi_h_l = static_cast<float>(round(rois_ptr[2])) * p.scale - 0.5; float roi_w_r = static_cast<float>(round(rois_ptr[3]) + 1.) * p.scale - 0.5; float roi_h_r = static_cast<float>(round(rois_ptr[4]) + 1.) * p.scale - 0.5; // Force too small ROIs to be 1x1 float roi_w = max(roi_w_r - roi_w_l, 0.1); // avoid 0 float roi_h = max(roi_h_r - roi_h_l, 0.1); // Compute w and h at bottom float bin_sz_h = roi_h / static_cast<float>(p.pool_h); float bin_sz_w = roi_w / static_cast<float>(p.pool_w); float sub_bin_sz_h = bin_sz_h / static_cast<float>(p.sample_per_part); float sub_bin_sz_w = bin_sz_w / static_cast<float>(p.sample_per_part); int count = 0; int cls_id = ic / icpcls; float sum = 0, trans_x = 0, trans_y = 0; float hstart = static_cast<float>(ph) * bin_sz_h + roi_h_l; float wstart = static_cast<float>(pw) * bin_sz_w + roi_w_l; if (!p.no_trans) { int part_h = floor(static_cast<float>(ph) / p.pool_h * p.part_sz); int part_w = floor(static_cast<float>(pw) / p.pool_w * p.part_sz); int x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; trans_x = trans[x_idx] * static_cast<float>(p.trans_std); trans_y = trans[y_idx] * static_cast<float>(p.trans_std); } wstart += trans_x * roi_w; hstart += trans_y * roi_h; const float* data_ptr = data + (roi_batch_idx * p.IC + ic) * p.IH * p.IW; for (int ih = 0; ih < p.sample_per_part; ih++) { for (int iw = 0; iw < p.sample_per_part; iw++) { float w = wstart + iw * sub_bin_sz_w; float h = hstart + ih * sub_bin_sz_h; // bilinear interpolation if (w < -0.5 || w > p.IW - 0.5 || h < -0.5 || h > p.IH - 0.5) continue; w = min(max(w, 0.), p.IW - 1.); h = min(max(h, 0.), p.IH - 1.); float val = bilinear_interp(data_ptr, p.IH, p.IW, h, w); sum += val, count++; } } out_data[idx] = count == 0 ? (float)(0) : sum / count; out_count[idx] = count; } } __global__ void DeformablePSROIPoolBackwardAccKern( Param p, const float* data, const float* rois, const float* trans, const float* out_diff, const float* out_count, float* data_diff, float* trans_diff) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; const int icpcls = p.IC / p.nr_cls; KERN_FOR(idx, loops) { const int pw = idx % p.pool_w; const int ph = (idx / p.pool_w) % p.pool_h; const int ic = (idx / p.pool_w / p.pool_h) % p.IC; const int n = (idx / p.pool_w / p.pool_h / p.IC); const float* rois_ptr = &rois[n * 5]; int roi_batch_idx = rois_ptr[0]; float roi_w_l = static_cast<float>(round(rois_ptr[1])) * p.scale - 0.5; float roi_h_l = static_cast<float>(round(rois_ptr[2])) * p.scale - 0.5; float roi_w_r = static_cast<float>(round(rois_ptr[3]) + 1.) * p.scale - 0.5; float roi_h_r = static_cast<float>(round(rois_ptr[4]) + 1.) * p.scale - 0.5; // Force too small ROIs to be 1x1 float roi_w = max(roi_w_r - roi_w_l, 0.1); // avoid 0 float roi_h = max(roi_h_r - roi_h_l, 0.1); // Compute w and h at bottom float bin_sz_h = roi_h / static_cast<float>(p.pool_h); float bin_sz_w = roi_w / static_cast<float>(p.pool_w); float sub_bin_sz_h = bin_sz_h / static_cast<float>(p.sample_per_part); float sub_bin_sz_w = bin_sz_w / static_cast<float>(p.sample_per_part); int part_h = 0, part_w = 0, cls_id = ic / icpcls; float trans_x = 0, trans_y = 0; float wstart = static_cast<float>(pw) * bin_sz_w + roi_w_l; float hstart = static_cast<float>(ph) * bin_sz_h + roi_h_l; if (!p.no_trans) { part_h = floor(static_cast<float>(ph) / p.pool_h * p.part_sz); part_w = floor(static_cast<float>(pw) / p.pool_w * p.part_sz); int x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; trans_x = trans[x_idx] * static_cast<float>(p.trans_std); trans_y = trans[y_idx] * static_cast<float>(p.trans_std); } wstart += trans_x * roi_w; hstart += trans_y * roi_h; if (out_count[idx] <= 0) continue; float diff_val = out_diff[idx] / out_count[idx]; const int data_idx = (roi_batch_idx * p.IC + ic) * p.IH * p.IW; float* data_diff_ptr; const float* data_ptr; for (int ih = 0; ih < p.sample_per_part; ih++) { for (int iw = 0; iw < p.sample_per_part; iw++) { float w = wstart + iw * sub_bin_sz_w; float h = hstart + ih * sub_bin_sz_h; // bilinear interpolation if (w < -0.5 || w > p.IW - 0.5 || h < -0.5 || h > p.IH - 0.5) continue; w = min(max(w, 0.), p.IW - 1.), h = min(max(h, 0.), p.IH - 1.); // backward on feature int x0 = floor(w), x1 = ceil(w); int y0 = floor(h), y1 = ceil(h); float dist_x = w - x0, dist_y = h - y0; float q00 = (1 - dist_x) * (1 - dist_y); float q01 = (1 - dist_x) * dist_y; float q10 = dist_x * (1 - dist_y); float q11 = dist_x * dist_y; data_diff_ptr = &data_diff[data_idx]; atomicAdd(&data_diff_ptr[y0 * p.IW + x0], q00 * diff_val); atomicAdd(&data_diff_ptr[y1 * p.IW + x0], q01 * diff_val); atomicAdd(&data_diff_ptr[y0 * p.IW + x1], q10 * diff_val); atomicAdd(&data_diff_ptr[y1 * p.IW + x1], q11 * diff_val); if (p.no_trans) continue; data_ptr = &data[data_idx]; float U00 = data_ptr[y0 * p.IW + x0]; float U01 = data_ptr[y1 * p.IW + x0]; float U10 = data_ptr[y0 * p.IW + x1]; float U11 = data_ptr[y1 * p.IW + x1]; float diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * p.trans_std * diff_val; float diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * p.trans_std * diff_val; diff_x *= roi_w, diff_y *= roi_h; int diff_x_idx = (((n * p.nr_cls + cls_id) * 2) * p.part_sz + part_h) * p.part_sz + part_w; int diff_y_idx = (((n * p.nr_cls + cls_id) * 2 + 1) * p.part_sz + part_h) * p.part_sz + part_w; atomicAdd(&trans_diff[diff_x_idx], diff_x); atomicAdd(&trans_diff[diff_y_idx], diff_y); } } } } } // namespace namespace megdnn { namespace cuda { namespace deformable_ps_roi_pooling { void DeformablePSROIPoolForward( const TensorND& data, const TensorND& rois, const TensorND& trans, const TensorND& out_data, const TensorND& out_count, Param& p) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; int nr_thds = query_blocksize_for_kernel(DeformablePSROIPoolForwardKern); const int blks = DIVUP(loops, nr_thds); const float* data_ptr = data.ptr<float>(); const float* rois_ptr = rois.ptr<float>(); const float* trans_ptr = p.no_trans ? NULL : trans.ptr<float>(); float* out_data_ptr = out_data.ptr<float>(); float* out_count_ptr = out_count.ptr<float>(); auto&& out_data_elems = out_data.layout.total_nr_elems(); auto&& out_count_elems = out_count.layout.total_nr_elems(); size_t out_data_bytes = sizeof(float) * out_data_elems; size_t out_count_bytes = sizeof(float) * out_count_elems; cudaMemsetAsync(out_data_ptr, 0, out_data_bytes, p.stream); cudaMemsetAsync(out_count_ptr, 0, out_count_bytes, p.stream); DeformablePSROIPoolForwardKern<<<blks, nr_thds, 0, p.stream>>>( p, data_ptr, rois_ptr, trans_ptr, out_data_ptr, out_count_ptr); after_kernel_launch(); } void DeformablePSROIPoolBackwardAcc( const TensorND& data, const TensorND& rois, const TensorND& trans, const TensorND& out_diff, const TensorND& out_count, const TensorND& data_diff, const TensorND& trans_diff, Param& p) { const int loops = p.nr_bbox * p.IC * p.pool_h * p.pool_w; int nr_thds = query_blocksize_for_kernel(DeformablePSROIPoolBackwardAccKern); const int blks = DIVUP(loops, nr_thds); const float* data_ptr = data.ptr<float>(); const float* rois_ptr = rois.ptr<float>(); const float* trans_ptr = p.no_trans ? NULL : trans.ptr<float>(); const float* out_diff_ptr = out_diff.ptr<float>(); const float* out_count_ptr = out_count.ptr<float>(); float* data_diff_ptr = data_diff.ptr<float>(); float* trans_diff_ptr = trans_diff.ptr<float>(); auto&& data_diff_elems = data_diff.layout.total_nr_elems(); auto&& trans_diff_elems = trans_diff.layout.total_nr_elems(); size_t data_diff_bytes = sizeof(float) * data_diff_elems; size_t trans_diff_bytes = sizeof(float) * trans_diff_elems; cudaMemsetAsync(data_diff_ptr, 0, data_diff_bytes, p.stream); cudaMemsetAsync(trans_diff_ptr, 0, trans_diff_bytes, p.stream); DeformablePSROIPoolBackwardAccKern<<<blks, nr_thds, 0, p.stream>>>( p, data_ptr, rois_ptr, trans_ptr, out_diff_ptr, out_count_ptr, data_diff_ptr, trans_diff_ptr); after_kernel_launch(); } } // namespace deformable_ps_roi_pooling } // namespace cuda } // namespace megdnn // vim: ft=cuda syntax=cuda.doxygen
ab494ad7f09b27222758000955e03aa88ddf7100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #include <THHUNN/common.h> template <typename Dtype, typename Acctype> __global__ void #if __CUDA_ARCH__ >= 320 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(CUDA_NUM_THREADS) #endif LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Acctype accum_scale = Acctype(0); // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size); ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size); ++head; } } } template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* in, const Dtype* scale, const Dtype negative_beta, Dtype* out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype, typename Acctype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Acctype accum_ratio = Acctype(0); // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio); ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio); ++head; } } } #include <THHUNN/generic/SpatialCrossMapLRN.hip> #include <THH/THHGenerateFloatTypes.h>
ab494ad7f09b27222758000955e03aa88ddf7100.cu
#include <THCUNN/THCUNN.h> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> #include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #include <THCUNN/common.h> template <typename Dtype, typename Acctype> __global__ void #if __CUDA_ARCH__ >= 320 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(CUDA_NUM_THREADS) #endif LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Acctype accum_scale = Acctype(0); // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size); ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size); ++head; } } } template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* in, const Dtype* scale, const Dtype negative_beta, Dtype* out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype, typename Acctype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Acctype accum_ratio = Acctype(0); // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio); ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio); ++head; } } } #include <THCUNN/generic/SpatialCrossMapLRN.cu> #include <THC/THCGenerateFloatTypes.h>
91a18098bab099cda9ed1ef2252718301dcc8425.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> float* get_vector(int n_size, float seed = 0.0) { // buffer create float* p_vector = (float*)malloc(n_size * sizeof(float)); // initialize vector if (seed != 0.0) { for (int i = 0; i < n_size; i++) { p_vector[i] = seed * i; } } return p_vector; } void check_result(float* py, float* py_cuda, int n_size) { float compare = 0.0; for (int i = 0; i < n_size; i++) { compare += py[i] - py_cuda[i]; } printf("Result: %f\n", compare); } // CPU void saxpy(float* py, float* px, float alpha, int n_size) { for (int i = 0; i < n_size; i++) { py[i] = alpha * px[i] + py[i]; } } // CUDA Kernel function __global__ void d_saxpy(float* d_y, float* d_x, float alpha, int n_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; d_y[idx] = alpha * d_x[idx] + d_y[idx]; } int main() { float *px, *py, *py_cuda; int n_size = 65536; px = get_vector(n_size, 0.01); py = get_vector(n_size, 0.05); py_cuda = get_vector(n_size); // Step 1. Create GPU memory float *d_x, *d_y; hipMalloc((void**)&d_x, n_size * sizeof(float)); hipMalloc((void**)&d_y, n_size * sizeof(float)); // Step 2. Copy to GPU memory hipMemcpy(d_x, px, n_size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, py, n_size * sizeof(float), hipMemcpyHostToDevice); // Step 3. Kernel Call saxpy(py, px, 2.0, n_size); dim3 blockDim(16); dim3 gridDim((n_size + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( d_saxpy), dim3(gridDim), dim3(blockDim) , 0, 0, d_y, d_x, 2.0, n_size); // Step 4. Copy from GPU hipMemcpy(py_cuda, d_y, n_size * sizeof(float), hipMemcpyDeviceToHost); // Step 5. Check Result check_result(py, py_cuda, n_size); // Step 6. Finalize GPU memory hipFree(d_x); hipFree(d_y); free(px); free(py); free(py_cuda); return 0; }
91a18098bab099cda9ed1ef2252718301dcc8425.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> float* get_vector(int n_size, float seed = 0.0) { // buffer create float* p_vector = (float*)malloc(n_size * sizeof(float)); // initialize vector if (seed != 0.0) { for (int i = 0; i < n_size; i++) { p_vector[i] = seed * i; } } return p_vector; } void check_result(float* py, float* py_cuda, int n_size) { float compare = 0.0; for (int i = 0; i < n_size; i++) { compare += py[i] - py_cuda[i]; } printf("Result: %f\n", compare); } // CPU 연산 void saxpy(float* py, float* px, float alpha, int n_size) { for (int i = 0; i < n_size; i++) { py[i] = alpha * px[i] + py[i]; } } // CUDA Kernel function __global__ void d_saxpy(float* d_y, float* d_x, float alpha, int n_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; d_y[idx] = alpha * d_x[idx] + d_y[idx]; } int main() { float *px, *py, *py_cuda; int n_size = 65536; px = get_vector(n_size, 0.01); py = get_vector(n_size, 0.05); py_cuda = get_vector(n_size); // Step 1. Create GPU memory float *d_x, *d_y; cudaMalloc((void**)&d_x, n_size * sizeof(float)); cudaMalloc((void**)&d_y, n_size * sizeof(float)); // Step 2. Copy to GPU memory cudaMemcpy(d_x, px, n_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, py, n_size * sizeof(float), cudaMemcpyHostToDevice); // Step 3. Kernel Call saxpy(py, px, 2.0, n_size); dim3 blockDim(16); dim3 gridDim((n_size + blockDim.x - 1) / blockDim.x); d_saxpy<<< gridDim, blockDim >>>(d_y, d_x, 2.0, n_size); // Step 4. Copy from GPU cudaMemcpy(py_cuda, d_y, n_size * sizeof(float), cudaMemcpyDeviceToHost); // Step 5. Check Result check_result(py, py_cuda, n_size); // Step 6. Finalize GPU memory cudaFree(d_x); cudaFree(d_y); free(px); free(py); free(py_cuda); return 0; }
193e40e2dcba40ec8c87f054a9a006cd3e023799.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) { *o = Inverse<T>(*s); *found_inf = false; } template <typename T, typename MT> __global__ void CheckFiniteAndUnscale(const T** xs, const MT* scale, int64_t size, int64_t* starts, bool* found_inf, T** outs) { const int64_t tid = threadIdx.x + blockIdx.x * blockDim.x; // copy starts array from global memory to shared memory extern __shared__ int64_t s_starts[]; for (int i = threadIdx.x; i <= size; i += blockDim.x) { s_starts[i] = starts[i]; } __syncthreads(); const int64_t num = s_starts[size]; int xs_index = 0; bool local_found_inf = false; const MT local_scale = *scale; for (int64_t idx = tid; idx < num; idx += gridDim.x * blockDim.x) { // get the "out" index of "id" // For example: // idx = 15, starts = [0, 10, 10, 20, 30] // because 10 <= idx < 20 ==> // the idx element locate in the 3rd tensor (notice the 2nd tensor size is // 0) int next_xs_index = xs_index; while (idx >= s_starts[next_xs_index]) next_xs_index++; xs_index = next_xs_index - 1; // get in data and out data const T* in = xs[xs_index]; T* out = outs[xs_index]; int64_t in_idx = idx - s_starts[xs_index]; // Unscale MT val = static_cast<MT>(in[in_idx]) * local_scale; T narrow_val = static_cast<T>(val); out[in_idx] = narrow_val; // CheckFinite if (!isfinite(narrow_val)) { local_found_inf = true; } } if (local_found_inf) { *found_inf = true; } } template <typename T> class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const MPDType* scale_data = scale->data<MPDType>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); framework::Tensor inverse_scale = ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1}, dev_ctx); MPDType* inverse_scale_v = inverse_scale.template data<MPDType>(); hipLaunchKernelGGL(( InverseAndMemset<MPDType>), dim3(1), dim3(1), 0, dev_ctx.stream(), scale_data, inverse_scale_v, found_inf_data); size_t xs_size = xs.size(); if (xs_size == 0) return; const auto& cpu_place = platform::CPUPlace(); // calculate each tensor's start index and copy to device auto h_starts_tensor = memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t)); int64_t* h_starts = reinterpret_cast<int64_t*>(h_starts_tensor->ptr()); auto d_starts_tensor = memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t)); int64_t* d_starts = reinterpret_cast<int64_t*>(d_starts_tensor->ptr()); // the start index value of each tensor is // the sum of previous tensor's size. For example: // xs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30] h_starts[0] = 0; for (int i = 1; i <= xs_size; i++) { h_starts[i] = h_starts[i - 1] + xs[i - 1]->numel(); } int64_t total_num = h_starts[xs_size]; memory::Copy(dev_ctx.GetPlace(), d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()); // copy each tensor's data address to device auto h_mem = memory::Alloc(cpu_place, 2 * xs_size * sizeof(T*)); const T** h_xs = reinterpret_cast<const T**>(h_mem->ptr()); T** h_outs = reinterpret_cast<T**>(h_mem->ptr()) + xs_size; auto d_mem = memory::Alloc(dev_ctx, 2 * xs_size * sizeof(T*)); const T** d_xs = reinterpret_cast<const T**>(d_mem->ptr()); T** d_outs = reinterpret_cast<T**>(d_mem->ptr()) + xs_size; for (size_t i = 0; i < xs_size; ++i) { h_xs[i] = xs[i]->data<T>(); h_outs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace()); } memory::Copy(dev_ctx.GetPlace(), d_xs, cpu_place, h_xs, 2 * xs_size * sizeof(T*), dev_ctx.stream()); // Launch Kernel int threads_per_block = ::min(static_cast<int64_t>(1024), total_num); int elements_per_block = threads_per_block * 20; // each thread deal with 20 number int blocks_per_grid = (total_num + elements_per_block - 1) / elements_per_block; VLOG(3) << "launch kernel"; hipLaunchKernelGGL(( CheckFiniteAndUnscale< T, MPDType>), dim3(blocks_per_grid), dim3(threads_per_block), (xs_size + 1) * sizeof(int64_t), dev_ctx.stream(), d_xs, inverse_scale_v, xs_size, d_starts, found_inf_data, d_outs); VLOG(3) << "finish kernel"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale, ops::CheckFiniteAndUnscaleGpuKernel<float>, ops::CheckFiniteAndUnscaleGpuKernel<double>, ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
193e40e2dcba40ec8c87f054a9a006cd3e023799.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) { *o = Inverse<T>(*s); *found_inf = false; } template <typename T, typename MT> __global__ void CheckFiniteAndUnscale(const T** xs, const MT* scale, int64_t size, int64_t* starts, bool* found_inf, T** outs) { const int64_t tid = threadIdx.x + blockIdx.x * blockDim.x; // copy starts array from global memory to shared memory extern __shared__ int64_t s_starts[]; for (int i = threadIdx.x; i <= size; i += blockDim.x) { s_starts[i] = starts[i]; } __syncthreads(); const int64_t num = s_starts[size]; int xs_index = 0; bool local_found_inf = false; const MT local_scale = *scale; for (int64_t idx = tid; idx < num; idx += gridDim.x * blockDim.x) { // get the "out" index of "id" // For example: // idx = 15, starts = [0, 10, 10, 20, 30] // because 10 <= idx < 20 ==> // the idx element locate in the 3rd tensor (notice the 2nd tensor size is // 0) int next_xs_index = xs_index; while (idx >= s_starts[next_xs_index]) next_xs_index++; xs_index = next_xs_index - 1; // get in data and out data const T* in = xs[xs_index]; T* out = outs[xs_index]; int64_t in_idx = idx - s_starts[xs_index]; // Unscale MT val = static_cast<MT>(in[in_idx]) * local_scale; T narrow_val = static_cast<T>(val); out[in_idx] = narrow_val; // CheckFinite if (!isfinite(narrow_val)) { local_found_inf = true; } } if (local_found_inf) { *found_inf = true; } } template <typename T> class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const MPDType* scale_data = scale->data<MPDType>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); framework::Tensor inverse_scale = ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1}, dev_ctx); MPDType* inverse_scale_v = inverse_scale.template data<MPDType>(); InverseAndMemset<MPDType><<<1, 1, 0, dev_ctx.stream()>>>( scale_data, inverse_scale_v, found_inf_data); size_t xs_size = xs.size(); if (xs_size == 0) return; const auto& cpu_place = platform::CPUPlace(); // calculate each tensor's start index and copy to device auto h_starts_tensor = memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t)); int64_t* h_starts = reinterpret_cast<int64_t*>(h_starts_tensor->ptr()); auto d_starts_tensor = memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t)); int64_t* d_starts = reinterpret_cast<int64_t*>(d_starts_tensor->ptr()); // the start index value of each tensor is // the sum of previous tensor's size. For example: // xs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30] h_starts[0] = 0; for (int i = 1; i <= xs_size; i++) { h_starts[i] = h_starts[i - 1] + xs[i - 1]->numel(); } int64_t total_num = h_starts[xs_size]; memory::Copy(dev_ctx.GetPlace(), d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()); // copy each tensor's data address to device auto h_mem = memory::Alloc(cpu_place, 2 * xs_size * sizeof(T*)); const T** h_xs = reinterpret_cast<const T**>(h_mem->ptr()); T** h_outs = reinterpret_cast<T**>(h_mem->ptr()) + xs_size; auto d_mem = memory::Alloc(dev_ctx, 2 * xs_size * sizeof(T*)); const T** d_xs = reinterpret_cast<const T**>(d_mem->ptr()); T** d_outs = reinterpret_cast<T**>(d_mem->ptr()) + xs_size; for (size_t i = 0; i < xs_size; ++i) { h_xs[i] = xs[i]->data<T>(); h_outs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace()); } memory::Copy(dev_ctx.GetPlace(), d_xs, cpu_place, h_xs, 2 * xs_size * sizeof(T*), dev_ctx.stream()); // Launch Kernel int threads_per_block = std::min(static_cast<int64_t>(1024), total_num); int elements_per_block = threads_per_block * 20; // each thread deal with 20 number int blocks_per_grid = (total_num + elements_per_block - 1) / elements_per_block; VLOG(3) << "launch kernel"; CheckFiniteAndUnscale< T, MPDType><<<blocks_per_grid, threads_per_block, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()>>>( d_xs, inverse_scale_v, xs_size, d_starts, found_inf_data, d_outs); VLOG(3) << "finish kernel"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale, ops::CheckFiniteAndUnscaleGpuKernel<float>, ops::CheckFiniteAndUnscaleGpuKernel<double>, ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
2ae4d549e1628c97080f8fa0d853d9156e142b21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Wei Hu // ============================================================================= // // Class for performing time integration in fluid system.// // ============================================================================= #include "chrono_fsi/physics/ChFluidDynamics.cuh" namespace chrono { namespace fsi { // ----------------------------------------------------------------------------- /// Device function to calculate the share of density influence on a given /// marker from all other markers in a given cell __device__ void collideCellDensityReInit(Real& numerator, Real& denominator, int3 gridPos, uint index, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { //?c2 printf("grid pos %d %d %d \n", gridPos.x, gridPos.y, gridPos.z); uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real4 rhoPreMuB = sortedRhoPreMu[j]; Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML) continue; numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w); denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w); } } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along x __global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMuD[index].y += paramsD.deltaPress.x; } return; } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMuD[index].y -= paramsD.deltaPress.x; } return; } } // ----------------------------------------------------------------------------- /// Kernel to apply inlet/outlet BC along x __global__ void ApplyInletBoundaryXKernel(Real4* posRadD, Real3* VelMassD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (rhoPresMu.w > 0.0) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w <= 0.0) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); if (rhoPresMu.w <= -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x > -paramsD.x_in) rhoPresMuD[index].y = 0; if (posRad.x < paramsD.x_in) { // Real vel = paramsD.V_in * 4 * (posRadD[index].z) * (0.41 - posRadD[index].z) / (0.41 * 0.41); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along y __global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.y > paramsD.cMax.y) { posRad.y -= (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.y < paramsD.cMin.y) { posRad.y += (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along z __global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.z > paramsD.cMax.z) { posRad.z -= (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.z < paramsD.cMin.z) { posRad.z += (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- /// Kernel to keep particle inside the simulation domain __global__ void ApplyOutOfBoundaryKernel(Real4* posRadD, Real4* rhoPresMuD, Real3* velMasD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real3 vel = mR3(velMasD[index]); Real h = posRadD[index].w; if (posRad.x > 0.5 * paramsD.boxDimX) { posRad.x = 0.5 * paramsD.boxDimX; // vel.x = 0.0; } if (posRad.x < -0.5 * paramsD.boxDimX) { posRad.x = -0.5 * paramsD.boxDimX; // vel.x = 0.0; } if (posRad.y > 0.5 * paramsD.boxDimY) { posRad.y = 0.5 * paramsD.boxDimY; // vel.y = 0.0; } if (posRad.y < -0.5 * paramsD.boxDimY) { posRad.y = -0.5 * paramsD.boxDimY; // vel.y = 0.0; } if (posRad.z > 1.0 * paramsD.boxDimZ) { posRad.z = 1.0 * paramsD.boxDimZ; // vel.z = 0.0; } if (posRad.z < -0.0 * paramsD.boxDimZ) { posRad.z = -0.0 * paramsD.boxDimZ; // vel.z = 0.0; } posRadD[index] = mR4(posRad, h); velMasD[index] = mR3(vel); return; } // ----------------------------------------------------------------------------- /// Kernel to update the fluid properities. /// It updates the density, velocity and position relying on explicit Euler /// scheme. Pressure is obtained from the density and an Equation of State. __global__ void UpdateFluidD(Real4* posRadD, Real3* velMasD, Real3* vel_XSPH_D, Real4* rhoPresMuD, Real4* derivVelRhoD, Real3* tauXxYyZzD, Real3* tauXyXzYzD, Real3* derivTauXxYyZzD, Real3* derivTauXyXzYzD, Real4* sr_tau_I_mu_iD, int2 updatePortion, Real dT, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; // updatePortion = [start, end] index of the update portion if (index >= updatePortion.y) { return; } Real4 derivVelRho = derivVelRhoD[index]; Real4 rhoPresMu = rhoPresMuD[index]; Real h = posRadD[index].w; Real p_tr, p_n; if (rhoPresMu.w < 0) { if (paramsD.elastic_SPH) { // This is only implemented for granular material //-------------------------------- // ** shear stress tau //-------------------------------- Real3 tauXxYyZz = tauXxYyZzD[index]; Real3 tauXyXzYz = tauXyXzYzD[index]; Real3 derivTauXxYyZz = derivTauXxYyZzD[index]; Real3 derivTauXyXzYz = derivTauXyXzYzD[index]; Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT; Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT; // check if there is a plastic flow p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z); // rhoPresMu.y; tauXxYyZz.x += p_n; tauXxYyZz.y += p_n; tauXxYyZz.z += p_n; p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z); // rhoPresMu.y; updatedTauXxYyZz.x += p_tr; updatedTauXxYyZz.y += p_tr; updatedTauXxYyZz.z += p_tr; Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) + 2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); tau_n = sqrt(0.5 * tau_n); Real Chi = abs(tau_tr - tau_n) / dT / paramsD.G_shear; // should use the positive magnitude according to "A // constitutive law for dense granular flows" Nature 2006 if (p_tr > 0.0e0) { Real mu_s = paramsD.mu_fric_s; Real mu_2 = paramsD.mu_fric_2; // Real s_0 = mu_s * p_tr; // Real s_2 = mu_2 * p_tr; // Real xi = 1.1; Real dia = paramsD.ave_diam; Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);// Real I = Chi * dia * sqrt(paramsD.rho0 / p_tr); Real mu = mu_s + (mu_2 - mu_s) / (I0 / (I + 1.0E-9) + 1.0); // Real G0 = paramsD.G_shear; // Real alpha = xi*G0*I0*(paramsD.dT)*sqrt(p_tr); // Real B0 = s_2 + tau_tr + alpha; // Real H0 = s_2*tau_tr + s_0*alpha; // Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9); // if(tau_tr>s_0){ // Real coeff = tau_n1/(tau_tr+1e-9); // updatedTauXxYyZz = updatedTauXxYyZz*coeff; // updatedTauXyXzYz = updatedTauXyXzYz*coeff; // } Real tau_max = p_tr * mu; // p_tr*paramsD.Q_FA; // if (tau_tr > tau_max) { // should use tau_max instead of s_0 according to "A constitutive law for dense // granular flows" Nature 2006 Real coeff = tau_max / (tau_tr + 1e-9); updatedTauXxYyZz = updatedTauXxYyZz * coeff; updatedTauXyXzYz = updatedTauXyXzYz * coeff; } } if (p_tr < 0.0e0) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } if (derivVelRho.w < 0.0e0) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); sr_tau_I_mu_iD[index].y = tau_tr; tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr); tauXyXzYzD[index] = updatedTauXyXzYz; } //------------- // ** position //------------- Real3 vel_XSPH = velMasD[index] + paramsD.EPS_XSPH * vel_XSPH_D[index]; Real3 posRad = mR3(posRadD[index]); Real3 updatedPositon = posRad + vel_XSPH * dT; if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) { printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } posRadD[index] = mR4(updatedPositon, h); //------------- // ** velocity //------------- // Note that the velocity update should not use the XSPH contribution // It adds dissipation to the solution, and provides numerical damping Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index] Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT; velMasD[index] = updatedVelocity; //------------- // ** density //------------- if (paramsD.elastic_SPH) { // This is only implemented for granular material rhoPresMu.y = p_tr; rhoPresMu.x = paramsD.rho0; } else { Real rho2 = rhoPresMu.x + derivVelRho.w * dT; rhoPresMu.y = Eos(rho2, rhoPresMu.w); rhoPresMu.x = rho2; } if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) { printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } rhoPresMuD[index] = rhoPresMu; // rhoPresMuD updated } /// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time /// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces. // calculate the force that is f=m dv/dt derivVelRhoD[index] *= paramsD.markerMass; } //------------------------------------------------------------------------------ __global__ void Update_Fluid_State(Real3* new_vel, // input: sorted velocities, Real3* vis_vel, // input: sorted velocities, Real4* posRad, // input: sorted positions Real3* velMas, Real4* rhoPreMu, int4 updatePortion, const size_t numAllMarkers, double dT, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= updatePortion.y) return; velMas[i_idx] = velMas[i_idx] + paramsD.EPS_XSPH * new_vel[i_idx]; // printf("%f=", new_vel[i_idx].z); Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx]; Real h = posRad[i_idx].w; posRad[i_idx] = mR4(newpos, h); if (!(isfinite(posRad[i_idx].x) && isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) { printf("Error! particle %d position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel %f,%f,%f,%f\n", i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w); } if (!(isfinite(rhoPreMu[i_idx].x) && isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) { printf("Error! particle %d rhoPreMu is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel ! %f,%f,%f,%f\n", i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w); } if (!(isfinite(velMas[i_idx].x) && isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) { printf("Error! particle %d velocity is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !%f,%f,%f\n", i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z); } } // ----------------------------------------------------------------------------- /// Kernel for updating the density. /// It calculates the density of the markers. It does include the normalization /// close to the boundaries and free surface. __global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, size_t numAllMarkers) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numAllMarkers) return; // read particle data from sorted arrays Real3 posRadA = mR3(sortedPosRad[index]); Real4 rhoPreMuA = sortedRhoPreMu[index]; /// If density initialization should only be applied to fluid markers // if (rhoPreMuA.w > -.1) // return; // get address in grid int3 gridPos = calcGridPos(posRadA); Real numerator = 0.0; Real denominator = 0.0; // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); collideCellDensityReInit(numerator, denominator, neighbourPos, index, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } rhoPreMuA.x = numerator; /// denominator; // rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w); dummySortedRhoPreMu[index] = rhoPreMuA; } // ----------------------------------------------------------------------------- // CLASS FOR FLUID DYNAMICS SYSTEM // ----------------------------------------------------------------------------- ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<ChFsiDataManager> otherFsiData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects, ChFluidDynamics::Integrator type) : fsiData(otherFsiData), paramsH(otherParamsH), numObjectsH(otherNumObjects) { myIntegrator = type; switch (myIntegrator) { case ChFluidDynamics::Integrator::I2SPH: forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an I2SPH frame work.\n"); break; case ChFluidDynamics::Integrator::IISPH: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an IISPH framework.\n"); break; case ChFluidDynamics::Integrator::ExplicitSPH: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an ExplicitSPHframe work.\n"); break; /// Extend this function with your own linear solvers default: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); std::cout << "The ChFsiForce you chose has not been implemented, reverting back to " "ChFsiForceIISPH\n"; } } // ----------------------------------------------------------------------------- void ChFluidDynamics::Finalize() { printf("ChFluidDynamics::Finalize()\n"); forceSystem->Finalize(); hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); } // ----------------------------------------------------------------------------- ChFluidDynamics::~ChFluidDynamics() {} // ----------------------------------------------------------------------------- void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real dT) { if (GetIntegratorType() == ChFluidDynamics::Integrator::ExplicitSPH) forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD); else forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD); if (myIntegrator == ChFluidDynamics::Integrator::IISPH) this->UpdateFluid_Implicit(sphMarkersD2); else if (GetIntegratorType() == ChFluidDynamics::Integrator::ExplicitSPH) this->UpdateFluid(sphMarkersD1, dT); this->ApplyBoundarySPH_Markers(sphMarkersD2); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) { int2 updatePortion = mI2(0, fsiData->fsiGeneralData->referenceArray[fsiData->fsiGeneralData->referenceArray.size() - 1].y); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------ uint nBlock_UpdateFluid, nThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, nBlock_UpdateFluid, nThreads); hipLaunchKernelGGL(( UpdateFluidD), dim3(nBlock_UpdateFluid), dim3(nThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiData->fsiGeneralData->vel_XSPH_D), mR4CAST(sphMarkersD->rhoPresMuD), mR4CAST(fsiData->fsiGeneralData->derivVelRhoD_old), mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD), mR3CAST(fsiData->fsiGeneralData->derivTauXxYyZzD), mR3CAST(fsiData->fsiGeneralData->derivTauXyXzYzD), mR4CAST(fsiData->fsiGeneralData->sr_tau_I_mu_i), updatePortion, dT, isErrorD); hipDeviceSynchronize(); cudaCheckError(); //------------------------ hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in UpdateFluidD!\n"); } hipFree(isErrorD); free(isErrorH); } void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int4 updatePortion = mI4(fsiData->fsiGeneralData->referenceArray[haveHelper].x, fsiData->fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0); // std::cout << "Skipping the markers greater than " // << fsiData->fsiGeneralData->referenceArray[haveHelper + haveGhost].y << " in position update\n"; std::cout << "time step in UpdateFluid_Implicit " << paramsH->dT << std::endl; bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Update_Fluid_State), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(fsiData->fsiGeneralData->vel_XSPH_D), mR3CAST(fsiData->fsiGeneralData->vis_vel_SPH_D), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, numObjectsH->numAllMarkers, paramsH->dT, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n"); } //------------------------------------------------------------------------ hipFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- /** * @brief ApplyBoundarySPH_Markers * @details * applies periodic boundary conditions in x,y, and z directions */ void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); hipLaunchKernelGGL(( ApplyPeriodicBoundaryXKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); // ApplyOutOfBoundaryKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), // mR4CAST(sphMarkersD->rhoPresMuD), // mR3CAST(sphMarkersD->velMasD)); // hipDeviceSynchronize(); // cudaCheckError(); } /** * @brief ApplyBoundarySPH_Markers * @details * applies periodic boundary conditions in y, and z. The inlet/outlet BC is applied in the x direction. * This functions needs to be tested. */ void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); hipLaunchKernelGGL(( ApplyInletBoundaryXKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); // these are useful anyway for out of bound particles hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); } // ----------------------------------------------------------------------------- void ChFluidDynamics::DensityReinitialization() { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers); thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0)); hipLaunchKernelGGL(( ReCalcDensityD_F1), dim3(nBlock_NumSpheres), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(dummySortedRhoPreMu), mR4CAST(fsiData->sortedSphMarkersD->posRadD), mR3CAST(fsiData->sortedSphMarkersD->velMasD), mR4CAST(fsiData->sortedSphMarkersD->rhoPresMuD), U1CAST(fsiData->markersProximityD->gridMarkerIndexD), U1CAST(fsiData->markersProximityD->cellStartD), U1CAST(fsiData->markersProximityD->cellEndD), numObjectsH->numAllMarkers); hipDeviceSynchronize(); cudaCheckError(); ChFsiForce::CopySortedToOriginal_NonInvasive_R4(fsiData->sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu, fsiData->markersProximityD->gridMarkerIndexD); ChFsiForce::CopySortedToOriginal_NonInvasive_R4(fsiData->sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu, fsiData->markersProximityD->gridMarkerIndexD); dummySortedRhoPreMu.clear(); } } // namespace fsi } // end namespace chrono
2ae4d549e1628c97080f8fa0d853d9156e142b21.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Wei Hu // ============================================================================= // // Class for performing time integration in fluid system.// // ============================================================================= #include "chrono_fsi/physics/ChFluidDynamics.cuh" namespace chrono { namespace fsi { // ----------------------------------------------------------------------------- /// Device function to calculate the share of density influence on a given /// marker from all other markers in a given cell __device__ void collideCellDensityReInit(Real& numerator, Real& denominator, int3 gridPos, uint index, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { //?c2 printf("grid pos %d %d %d \n", gridPos.x, gridPos.y, gridPos.z); uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real4 rhoPreMuB = sortedRhoPreMu[j]; Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML) continue; numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w); denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w); } } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along x __global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMuD[index].y += paramsD.deltaPress.x; } return; } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMuD[index].y -= paramsD.deltaPress.x; } return; } } // ----------------------------------------------------------------------------- /// Kernel to apply inlet/outlet BC along x __global__ void ApplyInletBoundaryXKernel(Real4* posRadD, Real3* VelMassD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (rhoPresMu.w > 0.0) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w <= 0.0) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); if (rhoPresMu.w <= -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x > -paramsD.x_in) rhoPresMuD[index].y = 0; if (posRad.x < paramsD.x_in) { // Real vel = paramsD.V_in * 4 * (posRadD[index].z) * (0.41 - posRadD[index].z) / (0.41 * 0.41); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along y __global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.y > paramsD.cMax.y) { posRad.y -= (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.y < paramsD.cMin.y) { posRad.y += (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- /// Kernel to apply periodic BC along z __global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.z > paramsD.cMax.z) { posRad.z -= (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.z < paramsD.cMin.z) { posRad.z += (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- /// Kernel to keep particle inside the simulation domain __global__ void ApplyOutOfBoundaryKernel(Real4* posRadD, Real4* rhoPresMuD, Real3* velMasD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) { return; } Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) { return; } // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real3 vel = mR3(velMasD[index]); Real h = posRadD[index].w; if (posRad.x > 0.5 * paramsD.boxDimX) { posRad.x = 0.5 * paramsD.boxDimX; // vel.x = 0.0; } if (posRad.x < -0.5 * paramsD.boxDimX) { posRad.x = -0.5 * paramsD.boxDimX; // vel.x = 0.0; } if (posRad.y > 0.5 * paramsD.boxDimY) { posRad.y = 0.5 * paramsD.boxDimY; // vel.y = 0.0; } if (posRad.y < -0.5 * paramsD.boxDimY) { posRad.y = -0.5 * paramsD.boxDimY; // vel.y = 0.0; } if (posRad.z > 1.0 * paramsD.boxDimZ) { posRad.z = 1.0 * paramsD.boxDimZ; // vel.z = 0.0; } if (posRad.z < -0.0 * paramsD.boxDimZ) { posRad.z = -0.0 * paramsD.boxDimZ; // vel.z = 0.0; } posRadD[index] = mR4(posRad, h); velMasD[index] = mR3(vel); return; } // ----------------------------------------------------------------------------- /// Kernel to update the fluid properities. /// It updates the density, velocity and position relying on explicit Euler /// scheme. Pressure is obtained from the density and an Equation of State. __global__ void UpdateFluidD(Real4* posRadD, Real3* velMasD, Real3* vel_XSPH_D, Real4* rhoPresMuD, Real4* derivVelRhoD, Real3* tauXxYyZzD, Real3* tauXyXzYzD, Real3* derivTauXxYyZzD, Real3* derivTauXyXzYzD, Real4* sr_tau_I_mu_iD, int2 updatePortion, Real dT, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; // updatePortion = [start, end] index of the update portion if (index >= updatePortion.y) { return; } Real4 derivVelRho = derivVelRhoD[index]; Real4 rhoPresMu = rhoPresMuD[index]; Real h = posRadD[index].w; Real p_tr, p_n; if (rhoPresMu.w < 0) { if (paramsD.elastic_SPH) { // This is only implemented for granular material //-------------------------------- // ** shear stress tau //-------------------------------- Real3 tauXxYyZz = tauXxYyZzD[index]; Real3 tauXyXzYz = tauXyXzYzD[index]; Real3 derivTauXxYyZz = derivTauXxYyZzD[index]; Real3 derivTauXyXzYz = derivTauXyXzYzD[index]; Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT; Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT; // check if there is a plastic flow p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z); // rhoPresMu.y; tauXxYyZz.x += p_n; tauXxYyZz.y += p_n; tauXxYyZz.z += p_n; p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z); // rhoPresMu.y; updatedTauXxYyZz.x += p_tr; updatedTauXxYyZz.y += p_tr; updatedTauXxYyZz.z += p_tr; Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) + 2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); tau_n = sqrt(0.5 * tau_n); Real Chi = abs(tau_tr - tau_n) / dT / paramsD.G_shear; // should use the positive magnitude according to "A // constitutive law for dense granular flows" Nature 2006 if (p_tr > 0.0e0) { Real mu_s = paramsD.mu_fric_s; Real mu_2 = paramsD.mu_fric_2; // Real s_0 = mu_s * p_tr; // Real s_2 = mu_2 * p_tr; // Real xi = 1.1; Real dia = paramsD.ave_diam; Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);// Real I = Chi * dia * sqrt(paramsD.rho0 / p_tr); Real mu = mu_s + (mu_2 - mu_s) / (I0 / (I + 1.0E-9) + 1.0); // Real G0 = paramsD.G_shear; // Real alpha = xi*G0*I0*(paramsD.dT)*sqrt(p_tr); // Real B0 = s_2 + tau_tr + alpha; // Real H0 = s_2*tau_tr + s_0*alpha; // Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9); // if(tau_tr>s_0){ // Real coeff = tau_n1/(tau_tr+1e-9); // updatedTauXxYyZz = updatedTauXxYyZz*coeff; // updatedTauXyXzYz = updatedTauXyXzYz*coeff; // } Real tau_max = p_tr * mu; // p_tr*paramsD.Q_FA; // if (tau_tr > tau_max) { // should use tau_max instead of s_0 according to "A constitutive law for dense // granular flows" Nature 2006 Real coeff = tau_max / (tau_tr + 1e-9); updatedTauXxYyZz = updatedTauXxYyZz * coeff; updatedTauXyXzYz = updatedTauXyXzYz * coeff; } } if (p_tr < 0.0e0) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } if (derivVelRho.w < 0.0e0) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); sr_tau_I_mu_iD[index].y = tau_tr; tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr); tauXyXzYzD[index] = updatedTauXyXzYz; } //------------- // ** position //------------- Real3 vel_XSPH = velMasD[index] + paramsD.EPS_XSPH * vel_XSPH_D[index]; Real3 posRad = mR3(posRadD[index]); Real3 updatedPositon = posRad + vel_XSPH * dT; if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) { printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } posRadD[index] = mR4(updatedPositon, h); //------------- // ** velocity //------------- // Note that the velocity update should not use the XSPH contribution // It adds dissipation to the solution, and provides numerical damping Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index] Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT; velMasD[index] = updatedVelocity; //------------- // ** density //------------- if (paramsD.elastic_SPH) { // This is only implemented for granular material rhoPresMu.y = p_tr; rhoPresMu.x = paramsD.rho0; } else { Real rho2 = rhoPresMu.x + derivVelRho.w * dT; rhoPresMu.y = Eos(rho2, rhoPresMu.w); rhoPresMu.x = rho2; } if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) { printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } rhoPresMuD[index] = rhoPresMu; // rhoPresMuD updated } /// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time /// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces. // calculate the force that is f=m dv/dt derivVelRhoD[index] *= paramsD.markerMass; } //------------------------------------------------------------------------------ __global__ void Update_Fluid_State(Real3* new_vel, // input: sorted velocities, Real3* vis_vel, // input: sorted velocities, Real4* posRad, // input: sorted positions Real3* velMas, Real4* rhoPreMu, int4 updatePortion, const size_t numAllMarkers, double dT, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= updatePortion.y) return; velMas[i_idx] = velMas[i_idx] + paramsD.EPS_XSPH * new_vel[i_idx]; // printf("%f=", new_vel[i_idx].z); Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx]; Real h = posRad[i_idx].w; posRad[i_idx] = mR4(newpos, h); if (!(isfinite(posRad[i_idx].x) && isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) { printf("Error! particle %d position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel %f,%f,%f,%f\n", i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w); } if (!(isfinite(rhoPreMu[i_idx].x) && isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) { printf("Error! particle %d rhoPreMu is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel ! %f,%f,%f,%f\n", i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w); } if (!(isfinite(velMas[i_idx].x) && isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) { printf("Error! particle %d velocity is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !%f,%f,%f\n", i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z); } } // ----------------------------------------------------------------------------- /// Kernel for updating the density. /// It calculates the density of the markers. It does include the normalization /// close to the boundaries and free surface. __global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd, size_t numAllMarkers) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numAllMarkers) return; // read particle data from sorted arrays Real3 posRadA = mR3(sortedPosRad[index]); Real4 rhoPreMuA = sortedRhoPreMu[index]; /// If density initialization should only be applied to fluid markers // if (rhoPreMuA.w > -.1) // return; // get address in grid int3 gridPos = calcGridPos(posRadA); Real numerator = 0.0; Real denominator = 0.0; // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); collideCellDensityReInit(numerator, denominator, neighbourPos, index, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } rhoPreMuA.x = numerator; /// denominator; // rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w); dummySortedRhoPreMu[index] = rhoPreMuA; } // ----------------------------------------------------------------------------- // CLASS FOR FLUID DYNAMICS SYSTEM // ----------------------------------------------------------------------------- ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<ChFsiDataManager> otherFsiData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects, ChFluidDynamics::Integrator type) : fsiData(otherFsiData), paramsH(otherParamsH), numObjectsH(otherNumObjects) { myIntegrator = type; switch (myIntegrator) { case ChFluidDynamics::Integrator::I2SPH: forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an I2SPH frame work.\n"); break; case ChFluidDynamics::Integrator::IISPH: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an IISPH framework.\n"); break; case ChFluidDynamics::Integrator::ExplicitSPH: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); printf("Created an ExplicitSPHframe work.\n"); break; /// Extend this function with your own linear solvers default: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(otherBceWorker, fsiData->sortedSphMarkersD, fsiData->markersProximityD, fsiData->fsiGeneralData, paramsH, numObjectsH); std::cout << "The ChFsiForce you chose has not been implemented, reverting back to " "ChFsiForceIISPH\n"; } } // ----------------------------------------------------------------------------- void ChFluidDynamics::Finalize() { printf("ChFluidDynamics::Finalize()\n"); forceSystem->Finalize(); cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); } // ----------------------------------------------------------------------------- ChFluidDynamics::~ChFluidDynamics() {} // ----------------------------------------------------------------------------- void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real dT) { if (GetIntegratorType() == ChFluidDynamics::Integrator::ExplicitSPH) forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD); else forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD); if (myIntegrator == ChFluidDynamics::Integrator::IISPH) this->UpdateFluid_Implicit(sphMarkersD2); else if (GetIntegratorType() == ChFluidDynamics::Integrator::ExplicitSPH) this->UpdateFluid(sphMarkersD1, dT); this->ApplyBoundarySPH_Markers(sphMarkersD2); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) { int2 updatePortion = mI2(0, fsiData->fsiGeneralData->referenceArray[fsiData->fsiGeneralData->referenceArray.size() - 1].y); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------ uint nBlock_UpdateFluid, nThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, nBlock_UpdateFluid, nThreads); UpdateFluidD<<<nBlock_UpdateFluid, nThreads>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiData->fsiGeneralData->vel_XSPH_D), mR4CAST(sphMarkersD->rhoPresMuD), mR4CAST(fsiData->fsiGeneralData->derivVelRhoD_old), mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD), mR3CAST(fsiData->fsiGeneralData->derivTauXxYyZzD), mR3CAST(fsiData->fsiGeneralData->derivTauXyXzYzD), mR4CAST(fsiData->fsiGeneralData->sr_tau_I_mu_i), updatePortion, dT, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); //------------------------ cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in UpdateFluidD!\n"); } cudaFree(isErrorD); free(isErrorH); } void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int4 updatePortion = mI4(fsiData->fsiGeneralData->referenceArray[haveHelper].x, fsiData->fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0); // std::cout << "Skipping the markers greater than " // << fsiData->fsiGeneralData->referenceArray[haveHelper + haveGhost].y << " in position update\n"; std::cout << "time step in UpdateFluid_Implicit " << paramsH->dT << std::endl; bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Update_Fluid_State<<<numBlocks, numThreads>>>( mR3CAST(fsiData->fsiGeneralData->vel_XSPH_D), mR3CAST(fsiData->fsiGeneralData->vis_vel_SPH_D), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, numObjectsH->numAllMarkers, paramsH->dT, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n"); } //------------------------------------------------------------------------ cudaFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- /** * @brief ApplyBoundarySPH_Markers * @details * applies periodic boundary conditions in x,y, and z directions */ void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); ApplyPeriodicBoundaryXKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryYKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryZKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); // ApplyOutOfBoundaryKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), // mR4CAST(sphMarkersD->rhoPresMuD), // mR3CAST(sphMarkersD->velMasD)); // cudaDeviceSynchronize(); // cudaCheckError(); } /** * @brief ApplyBoundarySPH_Markers * @details * applies periodic boundary conditions in y, and z. The inlet/outlet BC is applied in the x direction. * This functions needs to be tested. */ void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); ApplyInletBoundaryXKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); // these are useful anyway for out of bound particles ApplyPeriodicBoundaryYKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryZKernel<<<nBlock_NumSpheres, nThreads_SphMarkers>>>(mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); } // ----------------------------------------------------------------------------- void ChFluidDynamics::DensityReinitialization() { uint nBlock_NumSpheres, nThreads_SphMarkers; computeGridSize((int)numObjectsH->numAllMarkers, 256, nBlock_NumSpheres, nThreads_SphMarkers); thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers); thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0)); ReCalcDensityD_F1<<<nBlock_NumSpheres, nThreads_SphMarkers>>>( mR4CAST(dummySortedRhoPreMu), mR4CAST(fsiData->sortedSphMarkersD->posRadD), mR3CAST(fsiData->sortedSphMarkersD->velMasD), mR4CAST(fsiData->sortedSphMarkersD->rhoPresMuD), U1CAST(fsiData->markersProximityD->gridMarkerIndexD), U1CAST(fsiData->markersProximityD->cellStartD), U1CAST(fsiData->markersProximityD->cellEndD), numObjectsH->numAllMarkers); cudaDeviceSynchronize(); cudaCheckError(); ChFsiForce::CopySortedToOriginal_NonInvasive_R4(fsiData->sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu, fsiData->markersProximityD->gridMarkerIndexD); ChFsiForce::CopySortedToOriginal_NonInvasive_R4(fsiData->sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu, fsiData->markersProximityD->gridMarkerIndexD); dummySortedRhoPreMu.clear(); } } // namespace fsi } // end namespace chrono
ffdfb8b6b98c3cfe190f7ee5f1bc8a38d1d78046.hip
// !!! This is a file automatically generated by hipify!!! // CUDA and CUBLAS functions // CUDA runtime #include <hip/hip_runtime.h> // Utilities and system includes #include <assert.h> #include <helper_functions.h> #include <helper_cuda.h> #include <vector> #include <string> #include <iostream> #include <limits> // std::numeric_limits #include <cmath> /* ceil */ #include <math_functions.h> #include "preferences.hh" #include "GPU/gpu_dpop_util_phase.hh" #include "GPU/gpu_globals.hh" #include "GPU/gpu_data_allocator.hh" #include "GPU/cuda_utils.hh" #include "GPU/cuda_dpop_state.hh" #include "Kernel/types.hh" using namespace CUDA; __global__ void compute_util_table_ver_0(int* utilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection int aid, int d_size, int nb_var_sep, int nb_binary); __global__ void compute_util_table_ver_1(int* utilTable, int** childUtilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection //int nbGroups, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, int nb_binary, int children_info_size); // bool is_root); __global__ void compute_util_table_ver_1Root(int* utilTable, int** childUtilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection //int nbGroups, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, int nb_binary, int children_info_size); // bool is_root); __global__ void compute_util_table_ver_2(int* dev_table, unsigned int blockShift, unsigned int utilTableSize, // size after projection int aid, int domSize, int nbVarSep, int nbBinary); __global__ void printArray(int** array, int id, unsigned int nRows, int nCols) { printf("Table nRwos=%d, nCols=%d\n", nRows, nCols); int limit = nRows > 50 ? 10 : nRows; for (int r = 0; r < limit; r++) { for (int c = 0; c < nCols; c++) { printf("%d ", array[id][r * nCols + c]); } printf("\n"); } } __global__ void associateVector(int** array2d, int id, int* array) { array2d[id] = array; } GPU_DPOPutilPhase::GPU_DPOPutilPhase() { cudaCheck(hipEventCreate(&startEventCpy)); cudaCheck(hipEventCreate(&stopEventCpy)); cudaCheck(hipEventCreate(&startEventCmp)); cudaCheck(hipEventCreate(&stopEventCmp)); computeStreams = nullptr; n_computeStreams = 0; tot_time_ms = 0; compute_time_ms = 0; copy_time_ms = 0; } GPU_DPOPutilPhase::~GPU_DPOPutilPhase() { // ------------------------------------------------ // CUDA cleanup // ------------------------------------------------ cudaCheck(hipEventDestroy(startEventCmp)); cudaCheck(hipEventDestroy(stopEventCmp)); cudaCheck(hipEventDestroy(startEventCpy)); cudaCheck(hipEventDestroy(stopEventCpy)); if (preferences::usePinnedMemory) { for (int i = 0; i < n_computeStreams; ++i) cudaCheck(hipStreamDestroy(computeStreams[i])); delete[] computeStreams; } } // _version_ = 0 (tree-leaves) // uses directly projected table, and optimizes values for the // domain of the current variable on GPU // // _version_ = 1 (if it can copy all children table in global memory) // uses directly projected table, and optimizes values for the // domain of the current variable on GPU // - A thread operates in a single world (and all domain elements of this agent) // - A group operates on a row of the UTIL table // - n groups (based on 256 threads) // // _version_ = 2 (when children table cannot fit= in global memory) // Computes only binary constraints on GPU and operates onto unprojected table. // - A thread operates all worlds, on a given combinaion of values of the separator set // (excluding thus current variable). // - Groups = Threads (256) // - Children are added on CPU // - Projection and Optimization is made on CPU // // _version_ = 3 (when children table cannot fit in global memory) // Computes only binary constraints on GPU and operates onto unprojected table. // For a given combinaion of values of the separator set (excluding thus // current variable), a thread manages all worlds and all values of the domain // of the current variable. // Children are added on CPU // Projection and Optimization is made on CPU // void GPU_DPOPutilPhase::compute_util_table(DPOPstate& dpop_state, int _version_) { float ms; // NOTE: Based on the version let the DPOP_state create a table of appropriate size. int nAgents = dpop_state.get_nb_dcop_agents(); host_nTableRowsNoProj = dpop_state.getUtilTableRows(); // before projection host_nTableRowsAfterProj = dpop_state.getUtilTableRowsAfterProj(); // ----------------------------------------------------------------------- // // GPU Device Setup // ----------------------------------------------------------------------- // setup_kernel(_version_, dpop_state); // ----------------------------------------------------------------------- // // Global memory // ----------------------------------------------------------------------- // host_table = dpop_state.getUtilTablePtr(); set_device_table_size(_version_, dpop_state); // TODO: Make this as a preprocessing step to speed it up. checkCudaErrors(hipMalloc(&dev_table, dev_nBytes)); //----------------------------------------------- // Copy Children Tables into Global Mem //----------------------------------------------- if (_version_ == 1) { CUDAutils::startTimer(startEventCpy); memcpyHtoD_children_tables(dpop_state); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); } //----------------------------------------------- // Initialize Streams and Events //----------------------------------------------- size_t max_nTableRowsPerStream = (preferences::streamSizeMB * 1e+6 / sizeof(int)); int n_computeStreams = (dev_nTableRows % max_nTableRowsPerStream == 0) ? (dev_nTableRows / max_nTableRowsPerStream) : (dev_nTableRows / max_nTableRowsPerStream) + 1; if (preferences::usePinnedMemory) { computeStreams = new hipStream_t[n_computeStreams]; for (int i = 0; i < n_computeStreams; ++i) cudaCheck(hipStreamCreate(&computeStreams[i])); } //----------------------------------------------- // Process UTIL Table //----------------------------------------------- CUDAutils::startTimer(startEventCmp); size_t cudaTableRowsLeft = 0; size_t nTableRowsToCompute = _version_ <= 1 ? host_nTableRowsAfterProj : host_nTableRowsNoProj; size_t nTableRowsComputed = 0; do { // Change Names and make both versions uniform cudaTableRowsLeft = (nTableRowsToCompute - nTableRowsComputed); if (dev_nTableRows > cudaTableRowsLeft) dev_nTableRows = cudaTableRowsLeft; size_t nbBlocksCompleted = 0; // ----------------------------------------------------------------------- // // PINNED (Asynch transfers Device->Host) // ----------------------------------------------------------------------- // if (preferences::usePinnedMemory) { for (int i = 0; i < n_computeStreams; i++) { int stream_nTableRows = i < (n_computeStreams - 1) ? max_nTableRowsPerStream : dev_nTableRows % max_nTableRowsPerStream; // Update nBlocks with the current dev_nTableRows info: nbBlocks = (stream_nTableRows % nbThreads == 0) ? (stream_nTableRows / nbThreads) : (stream_nTableRows / nbThreads) + 1; dev_nBytes = stream_nTableRows * sizeof(int); if (preferences::verbose) { printf("[GPU] Device Util Table size: [%zu] (MB=%zu)\n", stream_nTableRows, dev_nBytes / 1e+6); printf( "[GPU][%d] Kernel: nbBlocks=%zu nbStreams=%zu nbThreads=%zu\n", i, nbBlocks, n_computeStreams, nbThreads); } size_t runningNbBlocks = nbBlocks; size_t nbBlocksShift = nTableRowsComputed; size_t rowsToCompute = stream_nTableRows; execute_kernel(_version_, dpop_state, nbBlocksShift, rowsToCompute, runningNbBlocks, nbThreads, sharedMem, computeStreams[i]); nbBlocksCompleted += runningNbBlocks; // Copy Memory Back: Device --> Host cudaCheck( hipMemcpyAsync(&host_table[nTableRowsComputed], dev_table, dev_nBytes, hipMemcpyDeviceToHost, computeStreams[i])); nTableRowsComputed += stream_nTableRows; } // Streams cudaCheck(hipDeviceSynchronize()); // ----------------------------------------------------------------------- // } else { // ----------------------------------------------------------------------- // // PAGED // ----------------------------------------------------------------------- // // Update nBlocks with the current dev_nTableRows info: nbBlocks = (dev_nTableRows % nbThreads == 0) ? (dev_nTableRows / nbThreads) : (dev_nTableRows / nbThreads) + 1; dev_nBytes = dev_nTableRows * sizeof(int); nbBlocksCompleted = 0; while (nbBlocksCompleted < nbBlocks) { // EXECUTE KERNEL size_t runningNbBlocks = nbBlocks > CUDA::info::max_dim_grid ? CUDA::info::max_dim_grid : nbBlocks; size_t nbBlocksShift = nTableRowsComputed + nbBlocksCompleted; size_t rowsToCompute = dev_nTableRows; execute_kernel(_version_, dpop_state, nbBlocksShift, rowsToCompute, runningNbBlocks, nbThreads, sharedMem, (hipStream_t) 0); cudaCheck(hipDeviceSynchronize()); nbBlocksCompleted += runningNbBlocks; } // Copy Memory Back: Device --> Host CUDAutils::startTimer(startEventCpy); cudaCheck(hipMemcpy(&host_table[nTableRowsComputed], dev_table, dev_nBytes, hipMemcpyDeviceToHost)); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); nTableRowsComputed += dev_nTableRows; // ----------------------------------------------------------------------- // } } while (nTableRowsComputed < nTableRowsToCompute); // If _version_ 2 or 3 we still need to integrate the children table (on Host) compute_time_ms = CUDAutils::stopTimer(startEventCmp, stopEventCmp); // ------------------------------------------------ // cleanup // ------------------------------------------------ if (_version_ == 1) { for (int i = 0; i < dpop_state.getChildrenId().size(); i++) { cudaCheck(hipFree(host_chTablesMirror[i])); } cudaCheck(hipFree(dev_chTables)); delete[] host_chTablesMirror; } CUDAutils::startTimer(startEventCpy); // Note: This should not be done in Bucket Elimination (COP) // No transfer needed (Device -> Host -> Device) cudaCheck(hipFree(dev_table)); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); if (!preferences::silent) { printf("[GPU time] UTIL_%d Kernel compute: %.4f ms data transfer: %.4f ms\n", dpop_state.get_agent_id(), compute_time_ms, copy_time_ms); } } void GPU_DPOPutilPhase::setup_kernel(int _version_, DPOPstate& dpop_state) { nbThreads = 128; // Number of Parallel Threads per SM devPitch = 512; if (_version_ == 0 || _version_ == 1) { nbBlocks = (host_nTableRowsAfterProj % nbThreads == 0) ? (host_nTableRowsAfterProj / nbThreads) : (host_nTableRowsAfterProj / nbThreads) + 1; } else if (_version_ == 2 || _version_ == 3) { nbBlocks = (host_nTableRowsAfterProj % nbThreads == 0) ? (host_nTableRowsAfterProj / nbThreads) : (host_nTableRowsAfterProj / nbThreads) + 1; } // ----------------------------------------------------------------------- // // Shared memory // ----------------------------------------------------------------------- // sharedMem = 0; // rm binary constraints // if( has_unary ) // shared_mem += nb_worlds * d_size * sizeof(util_t); if (_version_ == 0) { sharedMem += nbThreads * dpop_state.get_dom_size() * sizeof(int); // __util_vector sharedMem += dpop_state.get_separator().size() * sizeof(int); } else if (_version_ == 1) { sharedMem += nbThreads * dpop_state.get_dom_size() * sizeof(int); // __util_vector sharedMem += dpop_state.get_separator().size() * sizeof(int); } else if (_version_ == 2) { sharedMem += dpop_state.get_separator().size() * sizeof(int); } assert(sharedMem <= CUDA::info::shared_memory); if (preferences::verbose) { printf("[GPU] Agent %d Shared Memory required %zu bytes \n", dpop_state.get_agent_id(), sharedMem); } } void GPU_DPOPutilPhase::set_device_table_size(int _version_, DPOPstate& dpop_state) { size_t cudaFreeMem = CUDAutils::get_nb_bytes_free_global_memory(); host_nBytes = 0; dev_nBytes = 0; dev_nTableRows = 0; if (_version_ == 0) { host_nBytes = host_nTableRowsAfterProj * sizeof(int); dev_nTableRows = host_nTableRowsAfterProj; } else if (_version_ == 1) { host_nBytes = host_nTableRowsAfterProj * sizeof(int); dev_nTableRows = host_nTableRowsAfterProj; // Add up the aggregated children Table memory and Remove it from CudaFreeMem size_t childrenMem = 0; std::vector<int> childrenId = dpop_state.getChildrenId(); for (int i = 0; i < childrenId.size(); i++) { childrenMem += dpop_state.getChildTableRows(childrenId[i]) * sizeof(int); if (preferences::verbose) { std::cout << "[GPU] Chid " << childrenId[i] << " required Mem: " << (childrenMem / 1e+6) << " MB\n"; } } cudaFreeMem -= childrenMem; } else if (_version_ == 2 || _version_ == 3) { host_nBytes = host_nTableRowsNoProj * sizeof(int); dev_nTableRows = host_nTableRowsNoProj; } dev_nBytes = host_nBytes; // We fit on Device whatever we can if (dev_nBytes >= cudaFreeMem) { dev_nTableRows = cudaFreeMem / sizeof(int); // ensure it's a multiple of d: int rem = dev_nTableRows % dpop_state.get_dom_size(); if (rem != 0) dev_nTableRows -= rem; dev_nBytes = dev_nTableRows * sizeof(int); } if (preferences::verbose) { if (_version_ == 0 || _version_ == 1) printf( "[GPU] Agent %d Util Table Memory Needed: %zu MB, [%d] free memory %zu MB \n", dpop_state.get_agent_id(), host_nBytes / 1e+6, host_nTableRowsAfterProj, cudaFreeMem / 1e+6); else if (_version_ == 2 || _version_ == 3) printf( "[GPU] Agent %d Util Table Memory Needed: %zu MB, [%d] free memory %zu MB \n", dpop_state.get_agent_id(), host_nBytes / 1e+6, host_nTableRowsNoProj, cudaFreeMem / 1e+6); } } void GPU_DPOPutilPhase::memcpyHtoD_children_tables(DPOPstate& dpop_state) { std::vector<int> childrenId = dpop_state.getChildrenId(); int nAgents = dpop_state.get_nb_dcop_agents(); checkCudaErrors(hipMalloc(&dev_chTables, nAgents * sizeof(int*))); host_chTablesMirror = new int*[childrenId.size()]; for (int i = 0; i < childrenId.size(); i++) { int chId = childrenId[i]; size_t ch_table_bytes = dpop_state.getChildTableRows(chId) * sizeof(int); int* dev_tmp; cudaCheck(hipMalloc(&dev_tmp, ch_table_bytes)); if (preferences::usePinnedMemory) { cudaCheck( hipMemcpyAsync(dev_tmp, dpop_state.getChildTablePtr(chId), ch_table_bytes, hipMemcpyHostToDevice, agtStream[chId])); hipLaunchKernelGGL(( associateVector), dim3(1),dim3(1),0,agtStream[chId], dev_chTables, chId, dev_tmp); } else { cudaCheck(hipMemcpy(dev_tmp, dpop_state.getChildTablePtr(chId), ch_table_bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( associateVector), dim3(1),dim3(1), 0, 0, dev_chTables, chId, dev_tmp); cudaCheck(hipDeviceSynchronize()); } host_chTablesMirror[i] = dev_tmp; } cudaCheck(hipDeviceSynchronize()); } void GPU_DPOPutilPhase::execute_kernel(int _version_, DPOPstate& dpop_state, size_t nbBlocksShift, size_t rowsToCompute, size_t runningNbBlocks, int nbThreads, size_t sharedMem, hipStream_t streamID) { if (_version_ == 0) { // std::cout << "Running version 0\n"; hipLaunchKernelGGL(( compute_util_table_ver_0), dim3(runningNbBlocks), dim3(nbThreads), sharedMem/*, streamID*/, 0, this->dev_table, nbBlocksShift, //nTableRowsComputed + nbBlocksCompleted, rowsToCompute,// dev_nTableRows, // [in pinned is: stream_nTableRows] dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints()); } else if (_version_ == 1) { if(dpop_state.is_root()) { hipLaunchKernelGGL(( compute_util_table_ver_1Root), dim3(runningNbBlocks), dim3(nbThreads), sharedMem, streamID, this->dev_table, this->dev_chTables, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// pinnde: stream_nTableRows, // paged: dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints(), dpop_state.get_children_info_size()); } else { hipLaunchKernelGGL(( compute_util_table_ver_1), dim3(runningNbBlocks), dim3(nbThreads), sharedMem, streamID, this->dev_table, this->dev_chTables, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// pinned: stream_nTableRows, // paged: this->dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints(), dpop_state.get_children_info_size()); } } else if (_version_ == 2) { hipLaunchKernelGGL(( compute_util_table_ver_2), dim3(runningNbBlocks), dim3(nbThreads), sharedMem, streamID , this->dev_table, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// stream_nTableRows, // paged: this->dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size() + 1, dpop_state.get_nb_binary_constraints()); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ __forceinline__ unsigned int lcuda_encode(int* t, int t_size, int d) { int _d = d; unsigned int ofs = t[--t_size]; #pragma unroll while (t_size > 0) { ofs += t[--t_size] * _d; _d *= d; } return ofs; } __device__ __forceinline__ unsigned int lcuda_fencode_next(int code, int t_size, int pos, int d) { return code + pow(d, t_size - pos - 1); } __device__ __forceinline__ void lcuda_decode(unsigned int code, int* t, int t_size, int d) { #pragma unroll for (int i = t_size - 1; i >= 0; i--) { t[i] = code % d; code /= d; } } __device__ __forceinline__ int lcuda_decode(const unsigned int& code, const int& pos, const int* dPow, const int& d) { return (code / dPow[pos]) % d; } __device__ __forceinline__ void lcuda_get_dPow(int* dPow, int dPow_size, int d) { if (dPow_size == 0) return; dPow[dPow_size - 1] = 1; #pragma unroll for (int i = dPow_size - 2; i >= 0; i--) { dPow[i] = dPow[i + 1] * d; } } //////////////////////////////////////////////////////////////////////////////// // NOTE: // Thread's private array definitely is stored at local memory space, in the DRAM off-the-chip, // and maybe cached in memory hierarchy. Generally, non-array variable are considered as virtual // registers in PTX and the number of registers in PTX are unlimited. However, obviously all these // virtual registers are not mapped to physical registers. A PTX postprocessor spills some registers // to local space according to the micro-architecture flags specified for NVCC, and optimizes the register usage. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // C U D A K E R N E L S ( UTIL Table computation ) //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 0 (Leaves) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_0(int* utilTable, unsigned int block_shift, unsigned int dev_nTableRows, // after projection int aid, int d_size, int nb_var_sep, int nb_binary) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _i = 0; int* __util_vector = &__smem[_i + (threadIdx.x * d_size)]; _i += blockDim.x * d_size; int* __dPow = &__smem[_i]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 1 (Small (<2G) Util Tables (Children Table can be copied to Global Mem) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_1(int* utilTable, int** childIdToUtilTablePtr, // childUtilTableRows ? unsigned int block_shift, unsigned int dev_nTableRows, //int nbThreads, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, //int max_ch_sep_size, int nb_binary, int children_info_size) { // bool is_root) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _j = 0, _k = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; int _ch_sep_size = 0; unsigned int _d_power = 1; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _j = 0; int* __util_vector = &__smem[_j + (threadIdx.x * d_size)]; _j += blockDim.x * d_size; int* __dPow = &__smem[_j]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // //int* g_constraint = gdev_DPOP_Agents[aid].binary_con; //int* g_children_info = gdev_DPOP_Agents[aid].children_info; int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . //int* g_con_utils = gdev_Constraints[_id].utils; // O(T) g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } g_Container = gdev_DPOP_Agents[aid].children_info; //--------------------------------------------------------------------- // // Messages from Children //--------------------------------------------------------------------- // _i = 0; //#pragma unroll while (_i < children_info_size) { _id = g_Container[_i++]; // O(1) _ch_sep_size = g_Container[_i++]; // O(1); if (_ch_sep_size <= 0) continue; _d_power = 1; _k = -1; //int* g_child_utils = childIdToUtilTablePtr[_id]; // O(1) g_UtilTable = childIdToUtilTablePtr[_id]; // O(1) // Do it in decreasing order to better compute the domain power _i = _i + _ch_sep_size - 1; _x2 = 0; // MAYBE CAN REM. J #pragma unroll for (_j = _ch_sep_size - 1; _j >= 0; _j--) { _x1 = g_Container[_i--]; // O(1) _k = (_x1 == -1) ? _j : _k; // index of sep_values (-1 if current agent) // Two solutions to speed it up: // * 1. Save it once in shared - reuse it for each _di // 2. Use d_size threads. _x2 += (_x1 == -1) ? 0 : ((utilTableCode / __dPow[_x1]) % d_size) * _d_power; // ~ O(1) _d_power *= d_size; } _i += _ch_sep_size + 1; _d_power = 1; #pragma unroll for (_j = 0; _j < _ch_sep_size - _k - 1; _j++) _d_power *= d_size; #pragma unroll for (_di = 0; _di < d_size; _di++) { __util_vector[_di] += g_UtilTable[_x2]; // O(T) _x2 += _d_power; } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 1 (Small (<2G) Util Tables (Children Table can be copied to Global Mem) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_1Root(int* utilTable, int** childIdToUtilTablePtr, // childUtilTableRows ? unsigned int block_shift, unsigned int dev_nTableRows, //int nbThreads, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, //int max_ch_sep_size, int nb_binary, int children_info_size) { // bool is_root) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) return; int _i = 0, _j = 0, _k = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; int _ch_sep_size = 0; unsigned int _d_power = 1; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _j = 0; int* __util_vector = &__smem[_j + (threadIdx.x * d_size)]; _j += blockDim.x * d_size; int* __dPow = &__smem[_j]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // //int* g_constraint = gdev_DPOP_Agents[aid].binary_con; //int* g_children_info = gdev_DPOP_Agents[aid].children_info; int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . //int* g_con_utils = gdev_Constraints[_id].utils; // O(T) g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } g_Container = gdev_DPOP_Agents[aid].children_info; //--------------------------------------------------------------------- // // Messages from Children //--------------------------------------------------------------------- // _i = 0; //#pragma unroll while (_i < children_info_size) { _id = g_Container[_i++]; // O(1) _ch_sep_size = g_Container[_i++]; // O(1); if (_ch_sep_size <= 0) continue; _d_power = 1; _k = -1; //int* g_child_utils = childIdToUtilTablePtr[_id]; // O(1) g_UtilTable = childIdToUtilTablePtr[_id]; // O(1) // Do it in decreasing order to better compute the domain power _i = _i + _ch_sep_size - 1; _x2 = 0; #pragma unroll for (_j = _ch_sep_size - 1; _j >= 0; _j--) { _x1 = g_Container[_i--]; // O(1) _k = (_x1 == -1) ? _j : _k; // index of sep_values (-1 if current agent) // Two solutions to speed it up: // * 1. Save it once in shared - reuse it for each _di // 2. Use d_size threads. _x2 += (_x1 == -1) ? 0 : ((utilTableCode / __dPow[_x1]) % d_size) * _d_power; // ~ O(1) _d_power *= d_size; } _i += _ch_sep_size + 1; _d_power = 1; #pragma unroll for (_j = 0; _j < _ch_sep_size - _k - 1; _j++) _d_power *= d_size; #pragma unroll for (_di = 0; _di < d_size; _di++) { __util_vector[_di] += g_UtilTable[_x2]; // O(T) _x2 += _d_power; } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) for (_di = 0; _di < d_size; _di++) _x1/*best_di*/= _x2/*UTIL*/== __util_vector[_di] ? _di : _x1/*best_di*/; gdev_DPOP_Agents[aid].best_value = _x1/*best_di*/; gdev_DPOP_Agents[aid].best_util = _x2/*UTIL*/; } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 2 (Very Large (>2GB) Util Tables) //////////////////////////////////////////////////////////////////////////////////////// // Deals with a non-projected UTIL table. Each tread computes one row of the table // (all worlds associated to it). // One group computes 256 rows. __global__ void compute_util_table_ver_2(int* utilTable, unsigned int block_shift, unsigned int dev_nTableRows, // size after projection int aid, int d_size, int nb_var_sep, int nb_binary) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _id = 0, _x1 = 0, _x2 = 0, _util = 0; int _scope_x1, _scope_x2; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __dPow[]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // #pragma unroll for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . g_UtilTable = gdev_Constraints[_id].utils; // O(T) // Update _x1, _x2 (This variable domain are stored in the last // position of the UtilTable before projection) _x1 = _x1 == -1 ? (nb_var_sep - 1) : _x1; _x2 = _x2 == -1 ? (nb_var_sep - 1) : _x2; _scope_x1 = ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) /*UTIL tmp*/ _x1 = g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) _util = (_x1 == UNSAT || _util == UNSAT) ? UNSAT : _util + /*UTIL tmp*/_x1; } utilTable[utilTableRow] = _util; // (global mem) } // @Deprecated __global__ void project_util_table(int *dev_table, unsigned int host_nTableRowsNoProj, int domSize) { // Thread 0 -> take first (domSize rows) and all worlds unsigned int _start_table_row = threadIdx.x * domSize + blockIdx.x * blockDim.x * domSize; unsigned int _end_table_row = _start_table_row + domSize - 1; if (_end_table_row > host_nTableRowsNoProj) return; // This should never happen // unsigned int _table_row_after_proj = threadIdx.x + blockIdx.x * blockDim.x; int _util_di, _best_util, _w, _d; _best_util = UNSAT; _util_di = UNSAT; #pragma unroll for (_d = 0; _d < domSize; _d++) { _util_di = dev_table[_start_table_row + _d]; if (_util_di != UNSAT && _util_di > _best_util) _best_util = _util_di; } dev_table[_start_table_row] = _best_util; }
ffdfb8b6b98c3cfe190f7ee5f1bc8a38d1d78046.cu
// CUDA and CUBLAS functions // CUDA runtime #include <cuda_runtime.h> // Utilities and system includes #include <assert.h> #include <helper_functions.h> #include <helper_cuda.h> #include <vector> #include <string> #include <iostream> #include <limits> // std::numeric_limits #include <cmath> /* ceil */ #include <math_functions.h> #include "preferences.hh" #include "GPU/gpu_dpop_util_phase.hh" #include "GPU/gpu_globals.hh" #include "GPU/gpu_data_allocator.hh" #include "GPU/cuda_utils.hh" #include "GPU/cuda_dpop_state.hh" #include "Kernel/types.hh" using namespace CUDA; __global__ void compute_util_table_ver_0(int* utilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection int aid, int d_size, int nb_var_sep, int nb_binary); __global__ void compute_util_table_ver_1(int* utilTable, int** childUtilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection //int nbGroups, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, int nb_binary, int children_info_size); // bool is_root); __global__ void compute_util_table_ver_1Root(int* utilTable, int** childUtilTable, unsigned int block_shift, unsigned int nb_util_table_rows, // after projection //int nbGroups, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, int nb_binary, int children_info_size); // bool is_root); __global__ void compute_util_table_ver_2(int* dev_table, unsigned int blockShift, unsigned int utilTableSize, // size after projection int aid, int domSize, int nbVarSep, int nbBinary); __global__ void printArray(int** array, int id, unsigned int nRows, int nCols) { printf("Table nRwos=%d, nCols=%d\n", nRows, nCols); int limit = nRows > 50 ? 10 : nRows; for (int r = 0; r < limit; r++) { for (int c = 0; c < nCols; c++) { printf("%d ", array[id][r * nCols + c]); } printf("\n"); } } __global__ void associateVector(int** array2d, int id, int* array) { array2d[id] = array; } GPU_DPOPutilPhase::GPU_DPOPutilPhase() { cudaCheck(cudaEventCreate(&startEventCpy)); cudaCheck(cudaEventCreate(&stopEventCpy)); cudaCheck(cudaEventCreate(&startEventCmp)); cudaCheck(cudaEventCreate(&stopEventCmp)); computeStreams = nullptr; n_computeStreams = 0; tot_time_ms = 0; compute_time_ms = 0; copy_time_ms = 0; } GPU_DPOPutilPhase::~GPU_DPOPutilPhase() { // ------------------------------------------------ // CUDA cleanup // ------------------------------------------------ cudaCheck(cudaEventDestroy(startEventCmp)); cudaCheck(cudaEventDestroy(stopEventCmp)); cudaCheck(cudaEventDestroy(startEventCpy)); cudaCheck(cudaEventDestroy(stopEventCpy)); if (preferences::usePinnedMemory) { for (int i = 0; i < n_computeStreams; ++i) cudaCheck(cudaStreamDestroy(computeStreams[i])); delete[] computeStreams; } } // _version_ = 0 (tree-leaves) // uses directly projected table, and optimizes values for the // domain of the current variable on GPU // // _version_ = 1 (if it can copy all children table in global memory) // uses directly projected table, and optimizes values for the // domain of the current variable on GPU // - A thread operates in a single world (and all domain elements of this agent) // - A group operates on a row of the UTIL table // - n groups (based on 256 threads) // // _version_ = 2 (when children table cannot fit= in global memory) // Computes only binary constraints on GPU and operates onto unprojected table. // - A thread operates all worlds, on a given combinaion of values of the separator set // (excluding thus current variable). // - Groups = Threads (256) // - Children are added on CPU // - Projection and Optimization is made on CPU // // _version_ = 3 (when children table cannot fit in global memory) // Computes only binary constraints on GPU and operates onto unprojected table. // For a given combinaion of values of the separator set (excluding thus // current variable), a thread manages all worlds and all values of the domain // of the current variable. // Children are added on CPU // Projection and Optimization is made on CPU // void GPU_DPOPutilPhase::compute_util_table(DPOPstate& dpop_state, int _version_) { float ms; // NOTE: Based on the version let the DPOP_state create a table of appropriate size. int nAgents = dpop_state.get_nb_dcop_agents(); host_nTableRowsNoProj = dpop_state.getUtilTableRows(); // before projection host_nTableRowsAfterProj = dpop_state.getUtilTableRowsAfterProj(); // ----------------------------------------------------------------------- // // GPU Device Setup // ----------------------------------------------------------------------- // setup_kernel(_version_, dpop_state); // ----------------------------------------------------------------------- // // Global memory // ----------------------------------------------------------------------- // host_table = dpop_state.getUtilTablePtr(); set_device_table_size(_version_, dpop_state); // TODO: Make this as a preprocessing step to speed it up. checkCudaErrors(cudaMalloc(&dev_table, dev_nBytes)); //----------------------------------------------- // Copy Children Tables into Global Mem //----------------------------------------------- if (_version_ == 1) { CUDAutils::startTimer(startEventCpy); memcpyHtoD_children_tables(dpop_state); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); } //----------------------------------------------- // Initialize Streams and Events //----------------------------------------------- size_t max_nTableRowsPerStream = (preferences::streamSizeMB * 1e+6 / sizeof(int)); int n_computeStreams = (dev_nTableRows % max_nTableRowsPerStream == 0) ? (dev_nTableRows / max_nTableRowsPerStream) : (dev_nTableRows / max_nTableRowsPerStream) + 1; if (preferences::usePinnedMemory) { computeStreams = new cudaStream_t[n_computeStreams]; for (int i = 0; i < n_computeStreams; ++i) cudaCheck(cudaStreamCreate(&computeStreams[i])); } //----------------------------------------------- // Process UTIL Table //----------------------------------------------- CUDAutils::startTimer(startEventCmp); size_t cudaTableRowsLeft = 0; size_t nTableRowsToCompute = _version_ <= 1 ? host_nTableRowsAfterProj : host_nTableRowsNoProj; size_t nTableRowsComputed = 0; do { // Change Names and make both versions uniform cudaTableRowsLeft = (nTableRowsToCompute - nTableRowsComputed); if (dev_nTableRows > cudaTableRowsLeft) dev_nTableRows = cudaTableRowsLeft; size_t nbBlocksCompleted = 0; // ----------------------------------------------------------------------- // // PINNED (Asynch transfers Device->Host) // ----------------------------------------------------------------------- // if (preferences::usePinnedMemory) { for (int i = 0; i < n_computeStreams; i++) { int stream_nTableRows = i < (n_computeStreams - 1) ? max_nTableRowsPerStream : dev_nTableRows % max_nTableRowsPerStream; // Update nBlocks with the current dev_nTableRows info: nbBlocks = (stream_nTableRows % nbThreads == 0) ? (stream_nTableRows / nbThreads) : (stream_nTableRows / nbThreads) + 1; dev_nBytes = stream_nTableRows * sizeof(int); if (preferences::verbose) { printf("[GPU] Device Util Table size: [%zu] (MB=%zu)\n", stream_nTableRows, dev_nBytes / 1e+6); printf( "[GPU][%d] Kernel: nbBlocks=%zu nbStreams=%zu nbThreads=%zu\n", i, nbBlocks, n_computeStreams, nbThreads); } size_t runningNbBlocks = nbBlocks; size_t nbBlocksShift = nTableRowsComputed; size_t rowsToCompute = stream_nTableRows; execute_kernel(_version_, dpop_state, nbBlocksShift, rowsToCompute, runningNbBlocks, nbThreads, sharedMem, computeStreams[i]); nbBlocksCompleted += runningNbBlocks; // Copy Memory Back: Device --> Host cudaCheck( cudaMemcpyAsync(&host_table[nTableRowsComputed], dev_table, dev_nBytes, cudaMemcpyDeviceToHost, computeStreams[i])); nTableRowsComputed += stream_nTableRows; } // Streams cudaCheck(cudaDeviceSynchronize()); // ----------------------------------------------------------------------- // } else { // ----------------------------------------------------------------------- // // PAGED // ----------------------------------------------------------------------- // // Update nBlocks with the current dev_nTableRows info: nbBlocks = (dev_nTableRows % nbThreads == 0) ? (dev_nTableRows / nbThreads) : (dev_nTableRows / nbThreads) + 1; dev_nBytes = dev_nTableRows * sizeof(int); nbBlocksCompleted = 0; while (nbBlocksCompleted < nbBlocks) { // EXECUTE KERNEL size_t runningNbBlocks = nbBlocks > CUDA::info::max_dim_grid ? CUDA::info::max_dim_grid : nbBlocks; size_t nbBlocksShift = nTableRowsComputed + nbBlocksCompleted; size_t rowsToCompute = dev_nTableRows; execute_kernel(_version_, dpop_state, nbBlocksShift, rowsToCompute, runningNbBlocks, nbThreads, sharedMem, (cudaStream_t) 0); cudaCheck(cudaDeviceSynchronize()); nbBlocksCompleted += runningNbBlocks; } // Copy Memory Back: Device --> Host CUDAutils::startTimer(startEventCpy); cudaCheck(cudaMemcpy(&host_table[nTableRowsComputed], dev_table, dev_nBytes, cudaMemcpyDeviceToHost)); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); nTableRowsComputed += dev_nTableRows; // ----------------------------------------------------------------------- // } } while (nTableRowsComputed < nTableRowsToCompute); // If _version_ 2 or 3 we still need to integrate the children table (on Host) compute_time_ms = CUDAutils::stopTimer(startEventCmp, stopEventCmp); // ------------------------------------------------ // cleanup // ------------------------------------------------ if (_version_ == 1) { for (int i = 0; i < dpop_state.getChildrenId().size(); i++) { cudaCheck(cudaFree(host_chTablesMirror[i])); } cudaCheck(cudaFree(dev_chTables)); delete[] host_chTablesMirror; } CUDAutils::startTimer(startEventCpy); // Note: This should not be done in Bucket Elimination (COP) // No transfer needed (Device -> Host -> Device) cudaCheck(cudaFree(dev_table)); copy_time_ms += CUDAutils::stopTimer(startEventCpy, stopEventCpy); if (!preferences::silent) { printf("[GPU time] UTIL_%d Kernel compute: %.4f ms data transfer: %.4f ms\n", dpop_state.get_agent_id(), compute_time_ms, copy_time_ms); } } void GPU_DPOPutilPhase::setup_kernel(int _version_, DPOPstate& dpop_state) { nbThreads = 128; // Number of Parallel Threads per SM devPitch = 512; if (_version_ == 0 || _version_ == 1) { nbBlocks = (host_nTableRowsAfterProj % nbThreads == 0) ? (host_nTableRowsAfterProj / nbThreads) : (host_nTableRowsAfterProj / nbThreads) + 1; } else if (_version_ == 2 || _version_ == 3) { nbBlocks = (host_nTableRowsAfterProj % nbThreads == 0) ? (host_nTableRowsAfterProj / nbThreads) : (host_nTableRowsAfterProj / nbThreads) + 1; } // ----------------------------------------------------------------------- // // Shared memory // ----------------------------------------------------------------------- // sharedMem = 0; // rm binary constraints // if( has_unary ) // shared_mem += nb_worlds * d_size * sizeof(util_t); if (_version_ == 0) { sharedMem += nbThreads * dpop_state.get_dom_size() * sizeof(int); // __util_vector sharedMem += dpop_state.get_separator().size() * sizeof(int); } else if (_version_ == 1) { sharedMem += nbThreads * dpop_state.get_dom_size() * sizeof(int); // __util_vector sharedMem += dpop_state.get_separator().size() * sizeof(int); } else if (_version_ == 2) { sharedMem += dpop_state.get_separator().size() * sizeof(int); } assert(sharedMem <= CUDA::info::shared_memory); if (preferences::verbose) { printf("[GPU] Agent %d Shared Memory required %zu bytes \n", dpop_state.get_agent_id(), sharedMem); } } void GPU_DPOPutilPhase::set_device_table_size(int _version_, DPOPstate& dpop_state) { size_t cudaFreeMem = CUDAutils::get_nb_bytes_free_global_memory(); host_nBytes = 0; dev_nBytes = 0; dev_nTableRows = 0; if (_version_ == 0) { host_nBytes = host_nTableRowsAfterProj * sizeof(int); dev_nTableRows = host_nTableRowsAfterProj; } else if (_version_ == 1) { host_nBytes = host_nTableRowsAfterProj * sizeof(int); dev_nTableRows = host_nTableRowsAfterProj; // Add up the aggregated children Table memory and Remove it from CudaFreeMem size_t childrenMem = 0; std::vector<int> childrenId = dpop_state.getChildrenId(); for (int i = 0; i < childrenId.size(); i++) { childrenMem += dpop_state.getChildTableRows(childrenId[i]) * sizeof(int); if (preferences::verbose) { std::cout << "[GPU] Chid " << childrenId[i] << " required Mem: " << (childrenMem / 1e+6) << " MB\n"; } } cudaFreeMem -= childrenMem; } else if (_version_ == 2 || _version_ == 3) { host_nBytes = host_nTableRowsNoProj * sizeof(int); dev_nTableRows = host_nTableRowsNoProj; } dev_nBytes = host_nBytes; // We fit on Device whatever we can if (dev_nBytes >= cudaFreeMem) { dev_nTableRows = cudaFreeMem / sizeof(int); // ensure it's a multiple of d: int rem = dev_nTableRows % dpop_state.get_dom_size(); if (rem != 0) dev_nTableRows -= rem; dev_nBytes = dev_nTableRows * sizeof(int); } if (preferences::verbose) { if (_version_ == 0 || _version_ == 1) printf( "[GPU] Agent %d Util Table Memory Needed: %zu MB, [%d] free memory %zu MB \n", dpop_state.get_agent_id(), host_nBytes / 1e+6, host_nTableRowsAfterProj, cudaFreeMem / 1e+6); else if (_version_ == 2 || _version_ == 3) printf( "[GPU] Agent %d Util Table Memory Needed: %zu MB, [%d] free memory %zu MB \n", dpop_state.get_agent_id(), host_nBytes / 1e+6, host_nTableRowsNoProj, cudaFreeMem / 1e+6); } } void GPU_DPOPutilPhase::memcpyHtoD_children_tables(DPOPstate& dpop_state) { std::vector<int> childrenId = dpop_state.getChildrenId(); int nAgents = dpop_state.get_nb_dcop_agents(); checkCudaErrors(cudaMalloc(&dev_chTables, nAgents * sizeof(int*))); host_chTablesMirror = new int*[childrenId.size()]; for (int i = 0; i < childrenId.size(); i++) { int chId = childrenId[i]; size_t ch_table_bytes = dpop_state.getChildTableRows(chId) * sizeof(int); int* dev_tmp; cudaCheck(cudaMalloc(&dev_tmp, ch_table_bytes)); if (preferences::usePinnedMemory) { cudaCheck( cudaMemcpyAsync(dev_tmp, dpop_state.getChildTablePtr(chId), ch_table_bytes, cudaMemcpyHostToDevice, agtStream[chId])); associateVector<<<1,1,0,agtStream[chId]>>> (dev_chTables, chId, dev_tmp); } else { cudaCheck(cudaMemcpy(dev_tmp, dpop_state.getChildTablePtr(chId), ch_table_bytes, cudaMemcpyHostToDevice)); associateVector<<<1,1>>> (dev_chTables, chId, dev_tmp); cudaCheck(cudaDeviceSynchronize()); } host_chTablesMirror[i] = dev_tmp; } cudaCheck(cudaDeviceSynchronize()); } void GPU_DPOPutilPhase::execute_kernel(int _version_, DPOPstate& dpop_state, size_t nbBlocksShift, size_t rowsToCompute, size_t runningNbBlocks, int nbThreads, size_t sharedMem, cudaStream_t streamID) { if (_version_ == 0) { // std::cout << "Running version 0\n"; compute_util_table_ver_0<<< runningNbBlocks, nbThreads, sharedMem/*, streamID*/>>> (this->dev_table, nbBlocksShift, //nTableRowsComputed + nbBlocksCompleted, rowsToCompute,// dev_nTableRows, // [in pinned is: stream_nTableRows] dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints()); } else if (_version_ == 1) { if(dpop_state.is_root()) { compute_util_table_ver_1Root<<< runningNbBlocks, nbThreads, sharedMem, streamID>>> (this->dev_table, this->dev_chTables, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// pinnde: stream_nTableRows, // paged: dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints(), dpop_state.get_children_info_size()); } else { compute_util_table_ver_1<<< runningNbBlocks, nbThreads, sharedMem, streamID>>> (this->dev_table, this->dev_chTables, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// pinned: stream_nTableRows, // paged: this->dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size(), dpop_state.get_nb_binary_constraints(), dpop_state.get_children_info_size()); } } else if (_version_ == 2) { compute_util_table_ver_2<<< runningNbBlocks, nbThreads, sharedMem, streamID >>> (this->dev_table, nbBlocksShift,// [paged: nTableRowsComputed + nbBlocksCompleted], // [pinned: nTableRowsComputed[, rowsToCompute,// stream_nTableRows, // paged: this->dev_nTableRows dpop_state.get_agent_id(), dpop_state.get_dom_size(), dpop_state.get_separator().size() + 1, dpop_state.get_nb_binary_constraints()); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ __forceinline__ unsigned int lcuda_encode(int* t, int t_size, int d) { int _d = d; unsigned int ofs = t[--t_size]; #pragma unroll while (t_size > 0) { ofs += t[--t_size] * _d; _d *= d; } return ofs; } __device__ __forceinline__ unsigned int lcuda_fencode_next(int code, int t_size, int pos, int d) { return code + pow(d, t_size - pos - 1); } __device__ __forceinline__ void lcuda_decode(unsigned int code, int* t, int t_size, int d) { #pragma unroll for (int i = t_size - 1; i >= 0; i--) { t[i] = code % d; code /= d; } } __device__ __forceinline__ int lcuda_decode(const unsigned int& code, const int& pos, const int* dPow, const int& d) { return (code / dPow[pos]) % d; } __device__ __forceinline__ void lcuda_get_dPow(int* dPow, int dPow_size, int d) { if (dPow_size == 0) return; dPow[dPow_size - 1] = 1; #pragma unroll for (int i = dPow_size - 2; i >= 0; i--) { dPow[i] = dPow[i + 1] * d; } } //////////////////////////////////////////////////////////////////////////////// // NOTE: // Thread's private array definitely is stored at local memory space, in the DRAM off-the-chip, // and maybe cached in memory hierarchy. Generally, non-array variable are considered as virtual // registers in PTX and the number of registers in PTX are unlimited. However, obviously all these // virtual registers are not mapped to physical registers. A PTX postprocessor spills some registers // to local space according to the micro-architecture flags specified for NVCC, and optimizes the register usage. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // C U D A K E R N E L S ( UTIL Table computation ) //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 0 (Leaves) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_0(int* utilTable, unsigned int block_shift, unsigned int dev_nTableRows, // after projection int aid, int d_size, int nb_var_sep, int nb_binary) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _i = 0; int* __util_vector = &__smem[_i + (threadIdx.x * d_size)]; _i += blockDim.x * d_size; int* __dPow = &__smem[_i]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 1 (Small (<2G) Util Tables (Children Table can be copied to Global Mem) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_1(int* utilTable, int** childIdToUtilTablePtr, // childUtilTableRows ? unsigned int block_shift, unsigned int dev_nTableRows, //int nbThreads, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, //int max_ch_sep_size, int nb_binary, int children_info_size) { // bool is_root) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _j = 0, _k = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; int _ch_sep_size = 0; unsigned int _d_power = 1; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _j = 0; int* __util_vector = &__smem[_j + (threadIdx.x * d_size)]; _j += blockDim.x * d_size; int* __dPow = &__smem[_j]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // //int* g_constraint = gdev_DPOP_Agents[aid].binary_con; //int* g_children_info = gdev_DPOP_Agents[aid].children_info; int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . //int* g_con_utils = gdev_Constraints[_id].utils; // O(T) g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } g_Container = gdev_DPOP_Agents[aid].children_info; //--------------------------------------------------------------------- // // Messages from Children //--------------------------------------------------------------------- // _i = 0; //#pragma unroll while (_i < children_info_size) { _id = g_Container[_i++]; // O(1) _ch_sep_size = g_Container[_i++]; // O(1); if (_ch_sep_size <= 0) continue; _d_power = 1; _k = -1; //int* g_child_utils = childIdToUtilTablePtr[_id]; // O(1) g_UtilTable = childIdToUtilTablePtr[_id]; // O(1) // Do it in decreasing order to better compute the domain power _i = _i + _ch_sep_size - 1; _x2 = 0; // MAYBE CAN REM. J #pragma unroll for (_j = _ch_sep_size - 1; _j >= 0; _j--) { _x1 = g_Container[_i--]; // O(1) _k = (_x1 == -1) ? _j : _k; // index of sep_values (-1 if current agent) // Two solutions to speed it up: // * 1. Save it once in shared - reuse it for each _di // 2. Use d_size threads. _x2 += (_x1 == -1) ? 0 : ((utilTableCode / __dPow[_x1]) % d_size) * _d_power; // ~ O(1) _d_power *= d_size; } _i += _ch_sep_size + 1; _d_power = 1; #pragma unroll for (_j = 0; _j < _ch_sep_size - _k - 1; _j++) _d_power *= d_size; #pragma unroll for (_di = 0; _di < d_size; _di++) { __util_vector[_di] += g_UtilTable[_x2]; // O(T) _x2 += _d_power; } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 1 (Small (<2G) Util Tables (Children Table can be copied to Global Mem) //////////////////////////////////////////////////////////////////////////////////////// __global__ void compute_util_table_ver_1Root(int* utilTable, int** childIdToUtilTablePtr, // childUtilTableRows ? unsigned int block_shift, unsigned int dev_nTableRows, //int nbThreads, // each group computes a row (nb_worlds) int aid, int d_size, int nb_var_sep, //int max_ch_sep_size, int nb_binary, int children_info_size) { // bool is_root) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) return; int _i = 0, _j = 0, _k = 0, _di = 0, _id = 0, _x1 = 0, _x2 = 0; int _scope_x1, _scope_x2; int _ch_sep_size = 0; unsigned int _d_power = 1; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __smem[]; _j = 0; int* __util_vector = &__smem[_j + (threadIdx.x * d_size)]; _j += blockDim.x * d_size; int* __dPow = &__smem[_j]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Compute Util Table Entry Value // ---------------------------------------------------------------------- // for (_i = 0; _i < d_size; _i++) __util_vector[_i] = 0; // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // //int* g_constraint = gdev_DPOP_Agents[aid].binary_con; //int* g_children_info = gdev_DPOP_Agents[aid].children_info; int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . //int* g_con_utils = gdev_Constraints[_id].utils; // O(T) g_UtilTable = gdev_Constraints[_id].utils; // O(T) #pragma unroll for (_di = 0; _di < d_size; _di++) { _scope_x1 = _x1 == -1 ? _di : ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = _x2 == -1 ? _di : ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) // Two solutions to speed up this step: // 1. Align: if x1==-1 -> need transpose, so that threads can copy more data. // 2. Use d_size threads (BEST solution) Each thread read one cell and adds __util_vector[_di] += g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) (global mem) } } g_Container = gdev_DPOP_Agents[aid].children_info; //--------------------------------------------------------------------- // // Messages from Children //--------------------------------------------------------------------- // _i = 0; //#pragma unroll while (_i < children_info_size) { _id = g_Container[_i++]; // O(1) _ch_sep_size = g_Container[_i++]; // O(1); if (_ch_sep_size <= 0) continue; _d_power = 1; _k = -1; //int* g_child_utils = childIdToUtilTablePtr[_id]; // O(1) g_UtilTable = childIdToUtilTablePtr[_id]; // O(1) // Do it in decreasing order to better compute the domain power _i = _i + _ch_sep_size - 1; _x2 = 0; #pragma unroll for (_j = _ch_sep_size - 1; _j >= 0; _j--) { _x1 = g_Container[_i--]; // O(1) _k = (_x1 == -1) ? _j : _k; // index of sep_values (-1 if current agent) // Two solutions to speed it up: // * 1. Save it once in shared - reuse it for each _di // 2. Use d_size threads. _x2 += (_x1 == -1) ? 0 : ((utilTableCode / __dPow[_x1]) % d_size) * _d_power; // ~ O(1) _d_power *= d_size; } _i += _ch_sep_size + 1; _d_power = 1; #pragma unroll for (_j = 0; _j < _ch_sep_size - _k - 1; _j++) _d_power *= d_size; #pragma unroll for (_di = 0; _di < d_size; _di++) { __util_vector[_di] += g_UtilTable[_x2]; // O(T) _x2 += _d_power; } } // get best util: _x2/*UTIL*/= UNSAT; #pragma unroll for (_di = 0; _di < d_size; _di++) _x2/*UTIL*/= fmax((double) _x2/*UTIL*/, (double) __util_vector[_di]); utilTable[utilTableRow] = _x2/*UTIL*/; // (global mem) for (_di = 0; _di < d_size; _di++) _x1/*best_di*/= _x2/*UTIL*/== __util_vector[_di] ? _di : _x1/*best_di*/; gdev_DPOP_Agents[aid].best_value = _x1/*best_di*/; gdev_DPOP_Agents[aid].best_util = _x2/*UTIL*/; } //////////////////////////////////////////////////////////////////////////////////////// // V E R S I O N 2 (Very Large (>2GB) Util Tables) //////////////////////////////////////////////////////////////////////////////////////// // Deals with a non-projected UTIL table. Each tread computes one row of the table // (all worlds associated to it). // One group computes 256 rows. __global__ void compute_util_table_ver_2(int* utilTable, unsigned int block_shift, unsigned int dev_nTableRows, // size after projection int aid, int d_size, int nb_var_sep, int nb_binary) { // ---------------------------------------------------------------------- // // Registers // ---------------------------------------------------------------------- // unsigned int utilTableRow = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int utilTableCode = block_shift + utilTableRow; if (utilTableRow >= dev_nTableRows) // Thread Guard return; int _i = 0, _id = 0, _x1 = 0, _x2 = 0, _util = 0; int _scope_x1, _scope_x2; // ---------------------------------------------------------------------- // // Shared Memory Allocation // ---------------------------------------------------------------------- // extern __shared__ int __dPow[]; lcuda_get_dPow(__dPow, nb_var_sep, d_size); __syncthreads(); // ---------------------------------------------------------------------- // // Global Memory Links // ---------------------------------------------------------------------- // int* g_Container; // used for [g_constraint, g_children_info) int* g_UtilTable; // used for [g_con_utils, g_child_utils] g_Container = gdev_DPOP_Agents[aid].binary_con; //--------------------------------------------------------------------- // // Binary constraints //--------------------------------------------------------------------- // #pragma unroll for (_i = 0; _i < nb_binary; _i++) { _id = g_Container[3 * _i]; // O(1) _x1 = g_Container[3 * _i + 1]; // . _x2 = g_Container[3 * _i + 2]; // . g_UtilTable = gdev_Constraints[_id].utils; // O(T) // Update _x1, _x2 (This variable domain are stored in the last // position of the UtilTable before projection) _x1 = _x1 == -1 ? (nb_var_sep - 1) : _x1; _x2 = _x2 == -1 ? (nb_var_sep - 1) : _x2; _scope_x1 = ((utilTableCode / __dPow[_x1]) % d_size); // ~ O(1) _scope_x2 = ((utilTableCode / __dPow[_x2]) % d_size); // ~ O(1) /*UTIL tmp*/ _x1 = g_UtilTable[_scope_x1 * d_size + _scope_x2]; // O(T) _util = (_x1 == UNSAT || _util == UNSAT) ? UNSAT : _util + /*UTIL tmp*/_x1; } utilTable[utilTableRow] = _util; // (global mem) } // @Deprecated __global__ void project_util_table(int *dev_table, unsigned int host_nTableRowsNoProj, int domSize) { // Thread 0 -> take first (domSize rows) and all worlds unsigned int _start_table_row = threadIdx.x * domSize + blockIdx.x * blockDim.x * domSize; unsigned int _end_table_row = _start_table_row + domSize - 1; if (_end_table_row > host_nTableRowsNoProj) return; // This should never happen // unsigned int _table_row_after_proj = threadIdx.x + blockIdx.x * blockDim.x; int _util_di, _best_util, _w, _d; _best_util = UNSAT; _util_di = UNSAT; #pragma unroll for (_d = 0; _d < domSize; _d++) { _util_di = dev_table[_start_table_row + _d]; if (_util_di != UNSAT && _util_di > _best_util) _best_util = _util_di; } dev_table[_start_table_row] = _best_util; }
7c4e300f152c8348c4e0bc00fb103e652f29dfdf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, long *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); long map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); long map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); }
7c4e300f152c8348c4e0bc00fb103e652f29dfdf.cu
#include "THCUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, long *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); long map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); cunn_SpatialClassNLLCriterion_updateOutput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); long map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; cunn_SpatialClassNLLCriterion_updateGradInput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); }
c55259e86178dedd517a3e0c394f71c1d84c30ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "CpuGpuMat.h" #include "KernelBatchNormalization.cuh" #include <math.h> __global__ void gpuBatchNorm(float* gpuResult, float* gpuBeta, float* gpuGamma, float* gpuMovingMean, float* gpuMovingVar, float epsilon, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { gpuResult[id] = (gpuResult[id] - gpuMovingMean[id]) / sqrt(gpuMovingVar[id] + epsilon) * gpuGamma[id] + gpuBeta[id]; } } void gpuBatchNormalization(CpuGpuMat* result, CpuGpuMat* beta, CpuGpuMat* gamma, CpuGpuMat* movingMean, CpuGpuMat* movingVariance, float epsilon) { int threadsPerBlock = 32; int blocksPerGrid = ceil(double(beta->Size) / double(threadsPerBlock)); gpuBatchNorm << < blocksPerGrid, threadsPerBlock >> > ((float*)result->GpuP, (float*)beta->GpuP, (float*)gamma->GpuP, (float*)movingMean->GpuP, (float*)movingVariance->GpuP, epsilon, beta->Size); }
c55259e86178dedd517a3e0c394f71c1d84c30ba.cu
#include "device_launch_parameters.h" #include "CpuGpuMat.h" #include "KernelBatchNormalization.cuh" #include <math.h> __global__ void gpuBatchNorm(float* gpuResult, float* gpuBeta, float* gpuGamma, float* gpuMovingMean, float* gpuMovingVar, float epsilon, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { gpuResult[id] = (gpuResult[id] - gpuMovingMean[id]) / sqrt(gpuMovingVar[id] + epsilon) * gpuGamma[id] + gpuBeta[id]; } } void gpuBatchNormalization(CpuGpuMat* result, CpuGpuMat* beta, CpuGpuMat* gamma, CpuGpuMat* movingMean, CpuGpuMat* movingVariance, float epsilon) { int threadsPerBlock = 32; int blocksPerGrid = ceil(double(beta->Size) / double(threadsPerBlock)); gpuBatchNorm << < blocksPerGrid, threadsPerBlock >> > ((float*)result->GpuP, (float*)beta->GpuP, (float*)gamma->GpuP, (float*)movingMean->GpuP, (float*)movingVariance->GpuP, epsilon, beta->Size); }
b0d022a6f2875072e0fa6ac8428363d43f0dbd43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Device.h" unsigned int blockSize = 16; unsigned int blockSizeSqrt = 4; __global__ void Device::Workers::multiplyWorker(float* source0, float* source1, float* destination, int rows0, int cols0, int cols1){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.; if (col < cols1 && row < rows0) { for (int i = 0; i < cols0; i++) { sum += source0[row * cols0 + i] * source1[i * cols1 + col]; } destination[row * cols1 + col] = sum; } } __global__ void Device::Workers::multiplyWorkerScalar(float* source0, float scalar, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] * scalar; } } __global__ void Device::Workers::transposeWorker(float* source0, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[col * rows + row] = source0[row * cols + col]; } } __global__ void Device::Workers::addWorker(float* source0, float* source1, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] + source1[row * cols + col]; } } __global__ void Device::Workers::substractWorker(float* source0, float* source1, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] - source1[row * cols + col]; } } clock_t Device::multiply(float* source0, float* source1, float* destination, unsigned int rows0, unsigned int cols0, unsigned int cols1){ clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; hipMalloc((void**)& deviceSource0, sizeof(float) * rows0 * cols0); hipMalloc((void**)& deviceSource1, sizeof(float) * cols0 * cols1); hipMalloc((void**)& deviceDestination, sizeof(float) * rows0 * cols1); //Copy matrix hipMemcpy(deviceSource0, source0, sizeof(float) * rows0 * cols0, hipMemcpyHostToDevice); hipMemcpy(deviceSource1, source1, sizeof(float) * cols0 * cols1, hipMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols; if (rows0 / gridCols > cols1 / gridCols) { grideviceDestinationols = rows0 / gridCols; if (rows0 % gridCols != 0) { grideviceDestinationols++; } } else { grideviceDestinationols = cols1 / gridCols; if (cols1 % gridCols != 0) { grideviceDestinationols++; } } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols,1); Device::Workers::multiplyWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows0, cols0, cols1); // Transfer results hipMemcpy(destination, deviceDestination, sizeof(float) * rows0 * cols1, hipMemcpyDeviceToHost); destination = deviceDestination; hipDeviceSynchronize(); return clock() - start; } clock_t Device::multiply(float* source0, float scalar, float* destination, int rows, int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceDestination; hipMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); hipMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix hipMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, hipMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::multiplyWorkerScalar << <dimGrid, dimBlock >> > (deviceSource0, scalar, deviceDestination, rows, cols); // Transfer results hipMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, hipMemcpyDeviceToHost); destination = deviceDestination; hipDeviceSynchronize(); return clock() - start; } clock_t Device::transpose(float* source0, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceDestination; hipMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); hipMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix hipMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, hipMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::transposeWorker<< <dimGrid, dimBlock >> > (deviceSource0, deviceDestination, rows, cols); // Transfer results hipMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, hipMemcpyDeviceToHost); destination = deviceDestination; hipDeviceSynchronize(); return clock() - start; } clock_t Device::add(float* source0, float* source1, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; hipMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); hipMalloc((void**)& deviceSource1, sizeof(float) * rows * rows); hipMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix hipMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, hipMemcpyHostToDevice); hipMemcpy(deviceSource1, source1, sizeof(float) * rows * cols, hipMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::addWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows, cols); // Transfer results hipMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, hipMemcpyDeviceToHost); destination = deviceDestination; hipDeviceSynchronize(); return clock() - start; } clock_t Device::substract(float* source0, float* source1, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; hipMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); hipMalloc((void**)& deviceSource1, sizeof(float) * rows * rows); hipMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix hipMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, hipMemcpyHostToDevice); hipMemcpy(deviceSource1, source1, sizeof(float) * rows * cols, hipMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::addWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows, cols); // Transfer results hipMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, hipMemcpyDeviceToHost); destination = deviceDestination; hipDeviceSynchronize(); return clock() - start; }
b0d022a6f2875072e0fa6ac8428363d43f0dbd43.cu
#include "Device.h" unsigned int blockSize = 16; unsigned int blockSizeSqrt = 4; __global__ void Device::Workers::multiplyWorker(float* source0, float* source1, float* destination, int rows0, int cols0, int cols1){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.; if (col < cols1 && row < rows0) { for (int i = 0; i < cols0; i++) { sum += source0[row * cols0 + i] * source1[i * cols1 + col]; } destination[row * cols1 + col] = sum; } } __global__ void Device::Workers::multiplyWorkerScalar(float* source0, float scalar, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] * scalar; } } __global__ void Device::Workers::transposeWorker(float* source0, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[col * rows + row] = source0[row * cols + col]; } } __global__ void Device::Workers::addWorker(float* source0, float* source1, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] + source1[row * cols + col]; } } __global__ void Device::Workers::substractWorker(float* source0, float* source1, float* destination, int rows, int cols) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < cols && row < rows) { destination[row * cols + col] = source0[row * cols + col] - source1[row * cols + col]; } } clock_t Device::multiply(float* source0, float* source1, float* destination, unsigned int rows0, unsigned int cols0, unsigned int cols1){ clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; cudaMalloc((void**)& deviceSource0, sizeof(float) * rows0 * cols0); cudaMalloc((void**)& deviceSource1, sizeof(float) * cols0 * cols1); cudaMalloc((void**)& deviceDestination, sizeof(float) * rows0 * cols1); //Copy matrix cudaMemcpy(deviceSource0, source0, sizeof(float) * rows0 * cols0, cudaMemcpyHostToDevice); cudaMemcpy(deviceSource1, source1, sizeof(float) * cols0 * cols1, cudaMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols; if (rows0 / gridCols > cols1 / gridCols) { grideviceDestinationols = rows0 / gridCols; if (rows0 % gridCols != 0) { grideviceDestinationols++; } } else { grideviceDestinationols = cols1 / gridCols; if (cols1 % gridCols != 0) { grideviceDestinationols++; } } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols,1); Device::Workers::multiplyWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows0, cols0, cols1); // Transfer results cudaMemcpy(destination, deviceDestination, sizeof(float) * rows0 * cols1, cudaMemcpyDeviceToHost); destination = deviceDestination; cudaDeviceSynchronize(); return clock() - start; } clock_t Device::multiply(float* source0, float scalar, float* destination, int rows, int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceDestination; cudaMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); cudaMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix cudaMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::multiplyWorkerScalar << <dimGrid, dimBlock >> > (deviceSource0, scalar, deviceDestination, rows, cols); // Transfer results cudaMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost); destination = deviceDestination; cudaDeviceSynchronize(); return clock() - start; } clock_t Device::transpose(float* source0, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceDestination; cudaMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); cudaMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix cudaMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::transposeWorker<< <dimGrid, dimBlock >> > (deviceSource0, deviceDestination, rows, cols); // Transfer results cudaMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost); destination = deviceDestination; cudaDeviceSynchronize(); return clock() - start; } clock_t Device::add(float* source0, float* source1, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; cudaMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); cudaMalloc((void**)& deviceSource1, sizeof(float) * rows * rows); cudaMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix cudaMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); cudaMemcpy(deviceSource1, source1, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::addWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows, cols); // Transfer results cudaMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost); destination = deviceDestination; cudaDeviceSynchronize(); return clock() - start; } clock_t Device::substract(float* source0, float* source1, float* destination, unsigned int rows, unsigned int cols) { clock_t start = clock(); //Memory allocation on the deive float* deviceSource0, * deviceSource1, * deviceDestination; cudaMalloc((void**)& deviceSource0, sizeof(float) * rows * cols); cudaMalloc((void**)& deviceSource1, sizeof(float) * rows * rows); cudaMalloc((void**)& deviceDestination, sizeof(float) * rows * cols); //Copy matrix cudaMemcpy(deviceSource0, source0, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); cudaMemcpy(deviceSource1, source1, sizeof(float) * rows * cols, cudaMemcpyHostToDevice); unsigned int gridCols = blockSizeSqrt; unsigned int grideviceDestinationols = rows / gridCols; if (rows % gridCols != 0) { grideviceDestinationols++; } dim3 dimGrid(grideviceDestinationols, grideviceDestinationols, 1); dim3 dimBlock(gridCols, gridCols, 1); Device::Workers::addWorker << <dimGrid, dimBlock >> > (deviceSource0, deviceSource1, deviceDestination, rows, cols); // Transfer results cudaMemcpy(destination, deviceDestination, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost); destination = deviceDestination; cudaDeviceSynchronize(); return clock() - start; }
30533b238080465d3b1c46dbd9402e3e4a0ec074.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "TemporalConvolutionTBC_bp_bias.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; hipMalloc(&matrix, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); int rows = XSIZE; int stride = 2; float scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( TemporalConvolutionTBC_bp_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,target,rows,stride,scale); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( TemporalConvolutionTBC_bp_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,target,rows,stride,scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( TemporalConvolutionTBC_bp_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,target,rows,stride,scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
30533b238080465d3b1c46dbd9402e3e4a0ec074.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "TemporalConvolutionTBC_bp_bias.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; cudaMalloc(&matrix, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); int rows = XSIZE; int stride = 2; float scale = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); TemporalConvolutionTBC_bp_bias<<<gridBlock,threadBlock>>>(matrix,target,rows,stride,scale); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { TemporalConvolutionTBC_bp_bias<<<gridBlock,threadBlock>>>(matrix,target,rows,stride,scale); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { TemporalConvolutionTBC_bp_bias<<<gridBlock,threadBlock>>>(matrix,target,rows,stride,scale); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
57497edb4e96a82d7f175581b7f66e52eb5bb64d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <hiprand/hiprand_kernel.h> #define D 5 #define TRIALS_PER_THREAD 2048 #define BLOCKS 256 #define THREADS 256 __global__ void mc_int(double *res, hiprandState_t *states) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; double integral = 0.0; double X[D]; hiprand_init(tid, 0, 0, &states[tid]); for (int i = 0; i < TRIALS_PER_THREAD; i++) { for (int j = 0; j < D; j++) { X[j] = hiprand_uniform(&states[tid]); } double t = 0.0; for (int j = 0; j < D; j++) { t -= X[j] * X[j]; } integral += exp(t) / TRIALS_PER_THREAD; } res[tid] = integral; } int main(int argc, char **argv) { double host[BLOCKS * THREADS]; double *dev; hiprandState_t *states; double integral = 0.0; double vol = 1.0; clock_t ts = clock(); struct timeval start, end; gettimeofday(&start, NULL); hipMalloc((void**) &dev, BLOCKS * THREADS * sizeof(double)); hipMalloc((void**)&states, BLOCKS * THREADS * sizeof(hiprandState_t)); hipLaunchKernelGGL(( mc_int), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, states); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(double), hipMemcpyDeviceToHost); for(int i = 0; i < BLOCKS * THREADS; i++) { integral += host[i]; } integral /= BLOCKS * THREADS; for (int j = 0; j < D; j++) { vol *= 1.0; } integral *= vol; gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; ts = clock() - ts; printf("%ld clocks (%lf seconds)\n", ts, elapsed); printf("integral is: %lf\n", integral); hipFree(dev); hipFree(states); }
57497edb4e96a82d7f175581b7f66e52eb5bb64d.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <curand_kernel.h> #define D 5 #define TRIALS_PER_THREAD 2048 #define BLOCKS 256 #define THREADS 256 __global__ void mc_int(double *res, curandState *states) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; double integral = 0.0; double X[D]; curand_init(tid, 0, 0, &states[tid]); for (int i = 0; i < TRIALS_PER_THREAD; i++) { for (int j = 0; j < D; j++) { X[j] = curand_uniform(&states[tid]); } double t = 0.0; for (int j = 0; j < D; j++) { t -= X[j] * X[j]; } integral += exp(t) / TRIALS_PER_THREAD; } res[tid] = integral; } int main(int argc, char **argv) { double host[BLOCKS * THREADS]; double *dev; curandState *states; double integral = 0.0; double vol = 1.0; clock_t ts = clock(); struct timeval start, end; gettimeofday(&start, NULL); cudaMalloc((void**) &dev, BLOCKS * THREADS * sizeof(double)); cudaMalloc((void**)&states, BLOCKS * THREADS * sizeof(curandState)); mc_int<<<BLOCKS, THREADS>>>(dev, states); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(double), cudaMemcpyDeviceToHost); for(int i = 0; i < BLOCKS * THREADS; i++) { integral += host[i]; } integral /= BLOCKS * THREADS; for (int j = 0; j < D; j++) { vol *= 1.0; } integral *= vol; gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; ts = clock() - ts; printf("%ld clocks (%lf seconds)\n", ts, elapsed); printf("integral is: %lf\n", integral); cudaFree(dev); cudaFree(states); }
5d6075ae064c1e6a472d40b3c1f8d90eccf99641.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define MAX_DELAY 30 #include <time.h> #include <sys/time.h> #define USECPSEC 1000000ULL unsigned long long dtime_usec(unsigned long long start){ timeval tv; gettimeofday(&tv, 0); return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; } #define APPRX_CLKS_PER_SEC 1000000000ULL __global__ void delay_kernel(unsigned seconds){ unsigned long long dt = clock64(); while (clock64() < (dt + (seconds*APPRX_CLKS_PER_SEC))); } int main(int argc, char *argv[]){ unsigned delay_t = 5; // seconds, approximately // unsigned delay_t_r; // if (argc > 1) delay_t_r = atoi(argv[1]); // if ((delay_t_r > 0) && (delay_t_r < MAX_DELAY)) delay_t = delay_t_r; unsigned long long difft = dtime_usec(0); hipLaunchKernelGGL(( delay_kernel), dim3(1),dim3(1), 0, 0, delay_t); hipDeviceSynchronize(); difft = dtime_usec(difft); printf("kernel duration: %fs\n", difft/(float)USECPSEC); return 0; }
5d6075ae064c1e6a472d40b3c1f8d90eccf99641.cu
#include <stdio.h> #include <stdlib.h> #define MAX_DELAY 30 #include <time.h> #include <sys/time.h> #define USECPSEC 1000000ULL unsigned long long dtime_usec(unsigned long long start){ timeval tv; gettimeofday(&tv, 0); return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start; } #define APPRX_CLKS_PER_SEC 1000000000ULL __global__ void delay_kernel(unsigned seconds){ unsigned long long dt = clock64(); while (clock64() < (dt + (seconds*APPRX_CLKS_PER_SEC))); } int main(int argc, char *argv[]){ unsigned delay_t = 5; // seconds, approximately // unsigned delay_t_r; // if (argc > 1) delay_t_r = atoi(argv[1]); // if ((delay_t_r > 0) && (delay_t_r < MAX_DELAY)) delay_t = delay_t_r; unsigned long long difft = dtime_usec(0); delay_kernel<<<1,1>>>(delay_t); cudaDeviceSynchronize(); difft = dtime_usec(difft); printf("kernel duration: %fs\n", difft/(float)USECPSEC); return 0; }
de73c170751bc7de853bbb26ea0d32844d60744e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void checkIndex(void){ printf("threadIdx:(%d, %d, %d) \n blockIdx:(%d, %d, %d) \n blockDim:(%d, %d, %d) \n gridDim:(%d, %d, %d) \n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z ); } int main(int argc, char **argv){ // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1)/block.x); //check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side hipLaunchKernelGGL(( checkIndex) , dim3(grid),dim3(block), 0, 0, ); // reset device before you leave hipDeviceReset(); return 0; }
de73c170751bc7de853bbb26ea0d32844d60744e.cu
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex(void){ printf("threadIdx:(%d, %d, %d) \n blockIdx:(%d, %d, %d) \n blockDim:(%d, %d, %d) \n gridDim:(%d, %d, %d) \n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z ); } int main(int argc, char **argv){ // define total data element int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem+block.x-1)/block.x); //check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid,block>>>(); // reset device before you leave cudaDeviceReset(); return 0; }
1d9e9d4202fb1604cd1c730ec185b71ce69122b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * commonCUDAKernels.cu * * Created on: Feb 8, 2010 * Author: wen */ #ifndef COMMONCUDAKERNELS_CU_ #define COMMONCUDAKERNELS_CU_ #include "../includes/commonCUDAKernels.h" template <unsigned int blockSize, typename Type> __global__ void sumInputs(Type *input, unsigned int inputPitch, Type *output, unsigned int outputPitch) { __shared__ Type sData[blockSize]; int tid=threadIdx.x; int index=blockIdx.x*(blockDim.x*2)+tid; Type *inputRow; if(inputPitch>1) { inputRow=(Type *)((char *)input+blockIdx.y*inputPitch); } else { inputRow=input+blockIdx.y; } sData[tid]=inputRow[index]+inputRow[index+blockDim.x]; __syncthreads(); if(blockSize>=512) { if(tid<256) sData[tid]+=sData[tid+256]; } __syncthreads(); if(blockSize>=256) { if(tid<128) sData[tid]+=sData[tid+128]; } __syncthreads(); if(blockSize>=128) { if(tid<64) sData[tid]+=sData[tid+64]; } __syncthreads(); if(tid<32) { if(blockSize>=64) sData[tid]+=sData[tid+32]; if(blockSize>=32) sData[tid]+=sData[tid+16]; if(blockSize>=16) sData[tid]+=sData[tid+8]; if(blockSize>=8) sData[tid]+=sData[tid+4]; if(blockSize>=4) sData[tid]+=sData[tid+2]; if(blockSize>=2) sData[tid]+=sData[tid+1]; } if(tid==0) { Type *outputRow; if(outputPitch>1) { outputRow=(Type *)((char *)output+blockIdx.y*outputPitch); } else { outputRow=output+blockIdx.y; } outputRow[blockIdx.x]=sData[0]; } } #endif
1d9e9d4202fb1604cd1c730ec185b71ce69122b8.cu
/* * commonCUDAKernels.cu * * Created on: Feb 8, 2010 * Author: wen */ #ifndef COMMONCUDAKERNELS_CU_ #define COMMONCUDAKERNELS_CU_ #include "../includes/commonCUDAKernels.h" template <unsigned int blockSize, typename Type> __global__ void sumInputs(Type *input, unsigned int inputPitch, Type *output, unsigned int outputPitch) { __shared__ Type sData[blockSize]; int tid=threadIdx.x; int index=blockIdx.x*(blockDim.x*2)+tid; Type *inputRow; if(inputPitch>1) { inputRow=(Type *)((char *)input+blockIdx.y*inputPitch); } else { inputRow=input+blockIdx.y; } sData[tid]=inputRow[index]+inputRow[index+blockDim.x]; __syncthreads(); if(blockSize>=512) { if(tid<256) sData[tid]+=sData[tid+256]; } __syncthreads(); if(blockSize>=256) { if(tid<128) sData[tid]+=sData[tid+128]; } __syncthreads(); if(blockSize>=128) { if(tid<64) sData[tid]+=sData[tid+64]; } __syncthreads(); if(tid<32) { if(blockSize>=64) sData[tid]+=sData[tid+32]; if(blockSize>=32) sData[tid]+=sData[tid+16]; if(blockSize>=16) sData[tid]+=sData[tid+8]; if(blockSize>=8) sData[tid]+=sData[tid+4]; if(blockSize>=4) sData[tid]+=sData[tid+2]; if(blockSize>=2) sData[tid]+=sData[tid+1]; } if(tid==0) { Type *outputRow; if(outputPitch>1) { outputRow=(Type *)((char *)output+blockIdx.y*outputPitch); } else { outputRow=output+blockIdx.y; } outputRow[blockIdx.x]=sData[0]; } } #endif
a0662713e7c964fedbc2c2029294e25110763c4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/dznrm2.cu normal z -> d, Tue Feb 9 16:05:32 2016 */ #include "magma_internal.h" #include "commonblas_d.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define REAL //============================================================================== __global__ void magmablas_dnrm2_kernel( int m, double *dA, int ldda, double *dxnorm ) { const int tx = threadIdx.x; double *dx = dA + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; // get norm of dx double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } //============================================================================== __global__ void magmablas_dnrm2_check_kernel( int m, double *dA, int ldda, double *dxnorm, double *lsticc ) { const int tx = threadIdx.x; double *dx = dA + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; // get norm of dx only if lsticc[blockIdx+1] != 0 if ( lsticc[blockIdx.x + 1] == 0 ) return; double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } extern "C" void magmablas_dnrm2_check_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magmaDouble_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); hipLaunchKernelGGL(( magmablas_dnrm2_check_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, dxnorm, dlsticc ); } //============================================================================== __global__ void magmablas_dnrm2_smkernel( int m, int n, double *dA, int ldda, double *dxnorm ) { const int tx = threadIdx.x; const int ty = threadIdx.y; __shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; for( int k = ty; k < n; k += BLOCK_SIZEy ) { double *dx = dA + k * ldda; // get norm of dx double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZEx ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx][ty] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum ); if (tx == 0) dxnorm[k] = sqrt(sum[0][ty]); __syncthreads(); } } /* Compute the dnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. This routine uses only one SM (block). */ extern "C" void magmablas_dnrm2_sm_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); dim3 blocks( 1, 1 ); hipLaunchKernelGGL(( magmablas_dnrm2_smkernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dxnorm ); } //============================================================================== __global__ void magma_dnrm2_adjust_kernel(double *xnorm, double *c) { const int tx = threadIdx.x; __shared__ double sum[ BLOCK_SIZE ]; double temp; temp = MAGMA_D_ABS( c[tx] ) / xnorm[0]; sum[tx] = -temp * temp; magma_sum_reduce_n( blockDim.x, tx, sum ); __syncthreads(); if (tx == 0) xnorm[0] = xnorm[0] * sqrt(1+sum[0]); } /* Adjust the norm of c to give the norm of c[k+1:], assuming that c was changed with orthogonal transformations. */ extern "C" void magmablas_dnrm2_adjust_q( magma_int_t k, magmaDouble_ptr dxnorm, magmaDouble_ptr dc, magma_queue_t queue ) { dim3 threads( k ); dim3 blocks( 1 ); hipLaunchKernelGGL(( magma_dnrm2_adjust_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , dxnorm, dc); } //============================================================================== #define BS 256 __global__ void magma_dnrm2_row_check_adjust_kernel( int n, double tol, double *xnorm, double *xnorm2, double *C, int ldc, double *lsticc) { const int tx = threadIdx.x + blockIdx.x*BS; lsticc[tx+1] = 0; if (tx < n) { double temp = MAGMA_D_ABS( C[tx*ldc] ) / xnorm[tx]; temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) ); double temp2 = xnorm[tx] / xnorm2[tx]; temp2 = temp * (temp2 * temp2); if (temp2 <= tol) { lsticc[tx+1] = 1; } else { xnorm[tx] *= sqrt(temp); } } if (tx == 0) lsticc[0] = 0; magma_sum_reduce_n( blockDim.x, tx, lsticc ); } /* Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that C was changed with orthogonal transformations. It also do checks for QP3 */ extern "C" void magmablas_dnrm2_row_check_adjust_q( magma_int_t k, double tol, magmaDouble_ptr dxnorm, magmaDouble_ptr dxnorm2, magmaDouble_ptr dC, magma_int_t lddc, magmaDouble_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BS ); dim3 blocks( magma_ceildiv( k, BS ) ); hipLaunchKernelGGL(( magma_dnrm2_row_check_adjust_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc); } //============================================================================== /* Compute the dnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. The computation can be done using n blocks (default) or on one SM (commented). */ extern "C" void magmablas_dnrm2_cols_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); hipLaunchKernelGGL(( magmablas_dnrm2_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, dxnorm ); // The following would do the computation on one SM // magmablas_dnrm2_sm_q( m, n, dA, ldda, dxnorm, queue ); }
a0662713e7c964fedbc2c2029294e25110763c4c.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/dznrm2.cu normal z -> d, Tue Feb 9 16:05:32 2016 */ #include "magma_internal.h" #include "commonblas_d.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define REAL //============================================================================== __global__ void magmablas_dnrm2_kernel( int m, double *dA, int ldda, double *dxnorm ) { const int tx = threadIdx.x; double *dx = dA + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; // get norm of dx double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } //============================================================================== __global__ void magmablas_dnrm2_check_kernel( int m, double *dA, int ldda, double *dxnorm, double *lsticc ) { const int tx = threadIdx.x; double *dx = dA + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; // get norm of dx only if lsticc[blockIdx+1] != 0 if ( lsticc[blockIdx.x + 1] == 0 ) return; double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } extern "C" void magmablas_dnrm2_check_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magmaDouble_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); magmablas_dnrm2_check_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, dxnorm, dlsticc ); } //============================================================================== __global__ void magmablas_dnrm2_smkernel( int m, int n, double *dA, int ldda, double *dxnorm ) { const int tx = threadIdx.x; const int ty = threadIdx.y; __shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; for( int k = ty; k < n; k += BLOCK_SIZEy ) { double *dx = dA + k * ldda; // get norm of dx double lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZEx ) { #ifdef REAL double re = dx[j]; lsum += re*re; #else double re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx][ty] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum ); if (tx == 0) dxnorm[k] = sqrt(sum[0][ty]); __syncthreads(); } } /* Compute the dnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. This routine uses only one SM (block). */ extern "C" void magmablas_dnrm2_sm_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); dim3 blocks( 1, 1 ); magmablas_dnrm2_smkernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dxnorm ); } //============================================================================== __global__ void magma_dnrm2_adjust_kernel(double *xnorm, double *c) { const int tx = threadIdx.x; __shared__ double sum[ BLOCK_SIZE ]; double temp; temp = MAGMA_D_ABS( c[tx] ) / xnorm[0]; sum[tx] = -temp * temp; magma_sum_reduce_n( blockDim.x, tx, sum ); __syncthreads(); if (tx == 0) xnorm[0] = xnorm[0] * sqrt(1+sum[0]); } /* Adjust the norm of c to give the norm of c[k+1:], assuming that c was changed with orthogonal transformations. */ extern "C" void magmablas_dnrm2_adjust_q( magma_int_t k, magmaDouble_ptr dxnorm, magmaDouble_ptr dc, magma_queue_t queue ) { dim3 threads( k ); dim3 blocks( 1 ); magma_dnrm2_adjust_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> (dxnorm, dc); } //============================================================================== #define BS 256 __global__ void magma_dnrm2_row_check_adjust_kernel( int n, double tol, double *xnorm, double *xnorm2, double *C, int ldc, double *lsticc) { const int tx = threadIdx.x + blockIdx.x*BS; lsticc[tx+1] = 0; if (tx < n) { double temp = MAGMA_D_ABS( C[tx*ldc] ) / xnorm[tx]; temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) ); double temp2 = xnorm[tx] / xnorm2[tx]; temp2 = temp * (temp2 * temp2); if (temp2 <= tol) { lsticc[tx+1] = 1; } else { xnorm[tx] *= sqrt(temp); } } if (tx == 0) lsticc[0] = 0; magma_sum_reduce_n( blockDim.x, tx, lsticc ); } /* Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that C was changed with orthogonal transformations. It also do checks for QP3 */ extern "C" void magmablas_dnrm2_row_check_adjust_q( magma_int_t k, double tol, magmaDouble_ptr dxnorm, magmaDouble_ptr dxnorm2, magmaDouble_ptr dC, magma_int_t lddc, magmaDouble_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BS ); dim3 blocks( magma_ceildiv( k, BS ) ); magma_dnrm2_row_check_adjust_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> (k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc); } //============================================================================== /* Compute the dnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. The computation can be done using n blocks (default) or on one SM (commented). */ extern "C" void magmablas_dnrm2_cols_q( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magmaDouble_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); magmablas_dnrm2_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, dxnorm ); // The following would do the computation on one SM // magmablas_dnrm2_sm_q( m, n, dA, ldda, dxnorm, queue ); }
4f9600f6e3b04835b41f0fcd394f1bd139db1ebf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/experimental/graph_view.hpp> #include <cugraph/patterns/reduce_op.cuh> #include <cugraph/patterns/update_frontier_v_push_if_out_nbr.cuh> #include <cugraph/patterns/vertex_frontier.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/vertex_partition_device.cuh> #include <rmm/thrust_rmm_allocator.h> #include <raft/handle.hpp> #include <thrust/fill.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/optional.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <limits> #include <type_traits> namespace cugraph { namespace experimental { namespace detail { template <typename GraphViewType, typename PredecessorIterator> void bfs(raft::handle_t const &handle, GraphViewType const &push_graph_view, typename GraphViewType::vertex_type *distances, PredecessorIterator predecessor_first, typename GraphViewType::vertex_type source_vertex, bool direction_optimizing, typename GraphViewType::vertex_type depth_limit, bool do_expensive_check) { using vertex_t = typename GraphViewType::vertex_type; static_assert(std::is_integral<vertex_t>::value, "GraphViewType::vertex_type should be integral."); static_assert(!GraphViewType::is_adj_matrix_transposed, "GraphViewType should support the push model."); auto const num_vertices = push_graph_view.get_number_of_vertices(); if (num_vertices == 0) { return; } // 1. check input arguments CUGRAPH_EXPECTS( push_graph_view.is_symmetric() || !direction_optimizing, "Invalid input argument: input graph should be symmetric for direction optimizing BFS."); CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex), "Invalid input argument: source vertex out-of-range."); if (do_expensive_check) { // nothing to do } // 2. initialize distances and predecessors auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max(); auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value; auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)); thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()), thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()), val_first, [source_vertex] __device__(auto val) { auto distance = invalid_distance; if (val == source_vertex) { distance = vertex_t{0}; } return thrust::make_tuple(distance, invalid_vertex); }); // 3. initialize BFS frontier enum class Bucket { cur, next, num_buckets }; VertexFrontier<vertex_t, void, GraphViewType::is_multi_gpu, static_cast<size_t>(Bucket::num_buckets)> vertex_frontier(handle); if (push_graph_view.is_local_vertex_nocheck(source_vertex)) { vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex); } // 4. BFS iteration vertex_t depth{0}; while (true) { if (direction_optimizing) { CUGRAPH_FAIL("unimplemented."); } else { vertex_partition_device_t<GraphViewType> vertex_partition(push_graph_view); update_frontier_v_push_if_out_nbr( handle, push_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur), std::vector<size_t>{static_cast<size_t>(Bucket::next)}, thrust::make_constant_iterator(0) /* dummy */, thrust::make_constant_iterator(0) /* dummy */, [vertex_partition, distances] __device__( vertex_t src, vertex_t dst, auto src_val, auto dst_val) { auto push = true; if (vertex_partition.is_local_vertex_nocheck(dst)) { auto distance = *(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst)); if (distance != invalid_distance) { push = false; } } return push ? thrust::optional<vertex_t>{src} : thrust::nullopt; }, reduce_op::any<vertex_t>(), distances, thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)), [depth] __device__(auto v, auto v_val, auto pushed_val) { return (v_val == invalid_distance) ? thrust::optional< thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple( static_cast<size_t>(Bucket::next), thrust::make_tuple(depth + 1, pushed_val))} : thrust::nullopt; }); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear(); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit(); vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur), static_cast<size_t>(Bucket::next)); if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) { break; } } depth++; if (depth >= depth_limit) { break; } } CUDA_TRY(hipStreamSynchronize( handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once // this function returns (FIXME: should I stream sync in VertexFrontier // destructor?) } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu> void bfs(raft::handle_t const &handle, graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view, vertex_t *distances, vertex_t *predecessors, vertex_t source_vertex, bool direction_optimizing, vertex_t depth_limit, bool do_expensive_check) { if (predecessors != nullptr) { detail::bfs(handle, graph_view, distances, predecessors, source_vertex, direction_optimizing, depth_limit, do_expensive_check); } else { detail::bfs(handle, graph_view, distances, thrust::make_discard_iterator(), source_vertex, direction_optimizing, depth_limit, do_expensive_check); } } // explicit instantiation template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, true> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, true> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, false> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, false> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); } // namespace experimental } // namespace cugraph
4f9600f6e3b04835b41f0fcd394f1bd139db1ebf.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/experimental/graph_view.hpp> #include <cugraph/patterns/reduce_op.cuh> #include <cugraph/patterns/update_frontier_v_push_if_out_nbr.cuh> #include <cugraph/patterns/vertex_frontier.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/vertex_partition_device.cuh> #include <rmm/thrust_rmm_allocator.h> #include <raft/handle.hpp> #include <thrust/fill.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/optional.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <limits> #include <type_traits> namespace cugraph { namespace experimental { namespace detail { template <typename GraphViewType, typename PredecessorIterator> void bfs(raft::handle_t const &handle, GraphViewType const &push_graph_view, typename GraphViewType::vertex_type *distances, PredecessorIterator predecessor_first, typename GraphViewType::vertex_type source_vertex, bool direction_optimizing, typename GraphViewType::vertex_type depth_limit, bool do_expensive_check) { using vertex_t = typename GraphViewType::vertex_type; static_assert(std::is_integral<vertex_t>::value, "GraphViewType::vertex_type should be integral."); static_assert(!GraphViewType::is_adj_matrix_transposed, "GraphViewType should support the push model."); auto const num_vertices = push_graph_view.get_number_of_vertices(); if (num_vertices == 0) { return; } // 1. check input arguments CUGRAPH_EXPECTS( push_graph_view.is_symmetric() || !direction_optimizing, "Invalid input argument: input graph should be symmetric for direction optimizing BFS."); CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex), "Invalid input argument: source vertex out-of-range."); if (do_expensive_check) { // nothing to do } // 2. initialize distances and predecessors auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max(); auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value; auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)); thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()), thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()), val_first, [source_vertex] __device__(auto val) { auto distance = invalid_distance; if (val == source_vertex) { distance = vertex_t{0}; } return thrust::make_tuple(distance, invalid_vertex); }); // 3. initialize BFS frontier enum class Bucket { cur, next, num_buckets }; VertexFrontier<vertex_t, void, GraphViewType::is_multi_gpu, static_cast<size_t>(Bucket::num_buckets)> vertex_frontier(handle); if (push_graph_view.is_local_vertex_nocheck(source_vertex)) { vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex); } // 4. BFS iteration vertex_t depth{0}; while (true) { if (direction_optimizing) { CUGRAPH_FAIL("unimplemented."); } else { vertex_partition_device_t<GraphViewType> vertex_partition(push_graph_view); update_frontier_v_push_if_out_nbr( handle, push_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur), std::vector<size_t>{static_cast<size_t>(Bucket::next)}, thrust::make_constant_iterator(0) /* dummy */, thrust::make_constant_iterator(0) /* dummy */, [vertex_partition, distances] __device__( vertex_t src, vertex_t dst, auto src_val, auto dst_val) { auto push = true; if (vertex_partition.is_local_vertex_nocheck(dst)) { auto distance = *(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst)); if (distance != invalid_distance) { push = false; } } return push ? thrust::optional<vertex_t>{src} : thrust::nullopt; }, reduce_op::any<vertex_t>(), distances, thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)), [depth] __device__(auto v, auto v_val, auto pushed_val) { return (v_val == invalid_distance) ? thrust::optional< thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple( static_cast<size_t>(Bucket::next), thrust::make_tuple(depth + 1, pushed_val))} : thrust::nullopt; }); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear(); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit(); vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur), static_cast<size_t>(Bucket::next)); if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) { break; } } depth++; if (depth >= depth_limit) { break; } } CUDA_TRY(cudaStreamSynchronize( handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once // this function returns (FIXME: should I stream sync in VertexFrontier // destructor?) } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu> void bfs(raft::handle_t const &handle, graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view, vertex_t *distances, vertex_t *predecessors, vertex_t source_vertex, bool direction_optimizing, vertex_t depth_limit, bool do_expensive_check) { if (predecessors != nullptr) { detail::bfs(handle, graph_view, distances, predecessors, source_vertex, direction_optimizing, depth_limit, do_expensive_check); } else { detail::bfs(handle, graph_view, distances, thrust::make_discard_iterator(), source_vertex, direction_optimizing, depth_limit, do_expensive_check); } } // explicit instantiation template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, true> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, true> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, true> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, false> const &graph_view, int32_t *distances, int32_t *predecessors, int32_t source_vertex, bool direction_optimizing, int32_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, false> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); template void bfs(raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, false> const &graph_view, int64_t *distances, int64_t *predecessors, int64_t source_vertex, bool direction_optimizing, int64_t depth_limit, bool do_expensive_check); } // namespace experimental } // namespace cugraph
d4d88c88da6e903c077e9936172f802141161741.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "histogram_equalization.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *lut = NULL; hipMalloc(&lut, XSIZE*YSIZE); unsigned char *img_out = NULL; hipMalloc(&img_out, XSIZE*YSIZE); unsigned char *img_in = NULL; hipMalloc(&img_in, XSIZE*YSIZE); int *hist_in = NULL; hipMalloc(&hist_in, XSIZE*YSIZE); int img_size = XSIZE*YSIZE; int nbr_bin = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( histogram_equalization), dim3(gridBlock),dim3(threadBlock), 0, 0, lut,img_out,img_in,hist_in,img_size,nbr_bin); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( histogram_equalization), dim3(gridBlock),dim3(threadBlock), 0, 0, lut,img_out,img_in,hist_in,img_size,nbr_bin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( histogram_equalization), dim3(gridBlock),dim3(threadBlock), 0, 0, lut,img_out,img_in,hist_in,img_size,nbr_bin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d4d88c88da6e903c077e9936172f802141161741.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "histogram_equalization.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *lut = NULL; cudaMalloc(&lut, XSIZE*YSIZE); unsigned char *img_out = NULL; cudaMalloc(&img_out, XSIZE*YSIZE); unsigned char *img_in = NULL; cudaMalloc(&img_in, XSIZE*YSIZE); int *hist_in = NULL; cudaMalloc(&hist_in, XSIZE*YSIZE); int img_size = XSIZE*YSIZE; int nbr_bin = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); histogram_equalization<<<gridBlock,threadBlock>>>(lut,img_out,img_in,hist_in,img_size,nbr_bin); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { histogram_equalization<<<gridBlock,threadBlock>>>(lut,img_out,img_in,hist_in,img_size,nbr_bin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { histogram_equalization<<<gridBlock,threadBlock>>>(lut,img_out,img_in,hist_in,img_size,nbr_bin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c7d25035d41fa4d1edf87e6bbd03be5c4893a7b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void WCAForce(float4 *r, float4 *forces, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; float4 f=forces[i]; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; // sigma of the other bead, and mixed into sigma_ij sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void WCAEnergy(float4 *r, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; energy+=ss_c.eps*(r6inv*(r6inv-2.0f)+1.0f); } r[i].w=energy; } __global__ void NativeSubtractWCAForce(float4* r, float4* forces, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; r2.w=12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x-=r2.w*r2.x; f.y-=r2.w*r2.y; f.z-=r2.w*r2.z; } forces[i]=f; } __global__ void NativeSubtractWCAForce(float4* r, float4* forces, InteractionList<nc> list, float *sig, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; r2.w=Delta*12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x-=r2.w*r2.x; f.y-=r2.w*r2.y; f.z-=r2.w*r2.z; } forces[i]=f; } __global__ void NativeSubtractWCAEnergy(float4 *r, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; energy-=ss_c.eps*(r6inv*(r6inv-2.0f)+1.0f); } r[i].w=energy; } __global__ void WCANeighborList(float4* r, InteractionList<int> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if ( (r2.w<ss_c.Rcut2) and ( (abs(j-i)>1) or ((abs(j-i)>0) and ((i>=list.N/2) or (j>=list.N/2))) //bb with ss or ss with ss on neighboring residues (this actually excludes terminal beads of different chains, that are not bound) ) and ((j+list.N/2)!=i) and //exclude covalently bonded bb and ss beads ((i+list.N/2)!=j) ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void WCANeighborListMultTraj(float4* r, InteractionList<int> list, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if ( (r2.w<ss_c.Rcut2) and ( (abs(j-i)>1) or ((abs(j-i)>0) and ((i>=list.N/2) or (j>=list.N/2))) //bb with ss or ss with ss on neighboring residues (this actually excludes terminal beads of different chains, that are not bound) ) and ((j+list.N/2)!=i) and //exclude covalently bonded bb and ss beads ((i+list.N/2)!=j) and ((i/Ntraj)==(j/Ntraj)) //make sure beads belong to the same trajectory/replica ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void WCANeighborList(float4* r, InteractionList<int> intlist, InteractionList<int> neiblist) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=intlist.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int Npartners=intlist.count_d[i]; int neighbors=0; for (int ip=0;ip<Npartners;ip++) { int j=intlist.map_d[ip*intlist.N+i]; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w<ss_c.Rcut2) { neiblist.map_d[neighbors*neiblist.N+i]=j; neighbors++; } } neiblist.count_d[i]=neighbors; }
c7d25035d41fa4d1edf87e6bbd03be5c4893a7b1.cu
__global__ void WCAForce(float4 *r, float4 *forces, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; float4 f=forces[i]; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; // sigma of the other bead, and mixed into sigma_ij sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void WCAEnergy(float4 *r, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; energy+=ss_c.eps*(r6inv*(r6inv-2.0f)+1.0f); } r[i].w=energy; } __global__ void NativeSubtractWCAForce(float4* r, float4* forces, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; r2.w=12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x-=r2.w*r2.x; f.y-=r2.w*r2.y; f.z-=r2.w*r2.z; } forces[i]=f; } __global__ void NativeSubtractWCAForce(float4* r, float4* forces, InteractionList<nc> list, float *sig, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; r2.w=Delta*12.*ss_c.eps/sigma2*r2.w*r6inv*(1-r6inv); f.x-=r2.w*r2.x; f.y-=r2.w*r2.y; f.z-=r2.w*r2.z; } forces[i]=f; } __global__ void NativeSubtractWCAEnergy(float4 *r, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared float r6inv=r2.w*r2.w*r2.w; energy-=ss_c.eps*(r6inv*(r6inv-2.0f)+1.0f); } r[i].w=energy; } __global__ void WCANeighborList(float4* r, InteractionList<int> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if ( (r2.w<ss_c.Rcut2) and ( (abs(j-i)>1) or ((abs(j-i)>0) and ((i>=list.N/2) or (j>=list.N/2))) //bb with ss or ss with ss on neighboring residues (this actually excludes terminal beads of different chains, that are not bound) ) and ((j+list.N/2)!=i) and //exclude covalently bonded bb and ss beads ((i+list.N/2)!=j) ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void WCANeighborListMultTraj(float4* r, InteractionList<int> list, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if ( (r2.w<ss_c.Rcut2) and ( (abs(j-i)>1) or ((abs(j-i)>0) and ((i>=list.N/2) or (j>=list.N/2))) //bb with ss or ss with ss on neighboring residues (this actually excludes terminal beads of different chains, that are not bound) ) and ((j+list.N/2)!=i) and //exclude covalently bonded bb and ss beads ((i+list.N/2)!=j) and ((i/Ntraj)==(j/Ntraj)) //make sure beads belong to the same trajectory/replica ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void WCANeighborList(float4* r, InteractionList<int> intlist, InteractionList<int> neiblist) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=intlist.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int Npartners=intlist.count_d[i]; int neighbors=0; for (int ip=0;ip<Npartners;ip++) { int j=intlist.map_d[ip*intlist.N+i]; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w<ss_c.Rcut2) { neiblist.map_d[neighbors*neiblist.N+i]=j; neighbors++; } } neiblist.count_d[i]=neighbors; }
22af6fef7b0270ab43cf6034884f71af059435cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename T> static __global__ void sequence_expand_as_kernel(const T *in_data, const size_t *expand_offset, const size_t src_hight, const size_t src_widht, T *out_data) { for (int h_id = blockIdx.x; h_id < src_hight; h_id += gridDim.x) { int span = expand_offset[h_id + 1] - expand_offset[h_id]; if (span == 0) continue; const T *src = in_data + h_id * src_widht; for (int w_id = threadIdx.x; w_id < src_widht; w_id += blockDim.x) { T ele = src[w_id]; int offset = expand_offset[h_id] * src_widht; for (int k = 0; k < span; ++k) { out_data[offset + k * src_widht + w_id] = ele; } } } } template <typename T> static __global__ void sequence_expand_as_grad_kernel( const T *dout_data, const size_t *expand_offset, const size_t dst_hight, const size_t dst_width, T *dx_data) { for (int h_id = blockIdx.x; h_id < dst_hight; h_id += gridDim.x) { T *dst = dx_data + h_id * dst_width; int span = expand_offset[h_id + 1] - expand_offset[h_id]; for (int w_id = threadIdx.x; w_id < dst_width; w_id += blockDim.x) { T result = 0; for (int k = 0; k < span; ++k) { int offset = (expand_offset[h_id] + k) * dst_width; const T *src = dout_data + offset; result += src[w_id]; } dst[w_id] = result; } } } template <typename T> struct SequenceExpandAsFunctor<platform::CUDADeviceContext, T> { void operator()( const platform::CUDADeviceContext &context, const LoDTensor &x, const framework::Vector<size_t> &ref_lod, /*expand referenced lod*/ LoDTensor *out) { int height = x.dims()[0]; int width = phi::product(x.dims()) / height; const int kThreadsPerBlock = 1024; int thread_x = kThreadsPerBlock; if (width < kThreadsPerBlock) { // block_cols is aligned by 32. thread_x = ((width + 31) >> 5) << 5; } int max_threads = context.GetMaxPhysicalThreadCount(); int block_x = ::max(max_threads / thread_x, 1); dim3 block_size(thread_x); dim3 grid_size(block_x); paddle::framework::MixVector<size_t> mixv_ref_lod(&ref_lod); hipLaunchKernelGGL(( sequence_expand_as_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(), x.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, width, out->mutable_data<T>(context.GetPlace())); } }; template <typename T> struct SequenceExpandAsGradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext &context, const LoDTensor &dout, const framework::Vector<size_t> &ref_lod, /*expand based lod*/ LoDTensor *dx) { int height = dx->dims()[0]; int width = phi::product(dx->dims()) / height; const int kThreadsPerBlock = 1024; int thread_x = kThreadsPerBlock; if (width < kThreadsPerBlock) { // block_cols is aligned by 32. thread_x = ((width + 31) >> 5) << 5; } int max_threads = context.GetMaxPhysicalThreadCount(); int block_x = ::max(max_threads / thread_x, 1); dim3 block_size(thread_x); dim3 grid_size(block_x); paddle::framework::MixVector<size_t> mixv_ref_lod(&ref_lod); hipLaunchKernelGGL(( sequence_expand_as_grad_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(), dout.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, width, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_expand_as, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( sequence_expand_as_grad, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
22af6fef7b0270ab43cf6034884f71af059435cc.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename T> static __global__ void sequence_expand_as_kernel(const T *in_data, const size_t *expand_offset, const size_t src_hight, const size_t src_widht, T *out_data) { for (int h_id = blockIdx.x; h_id < src_hight; h_id += gridDim.x) { int span = expand_offset[h_id + 1] - expand_offset[h_id]; if (span == 0) continue; const T *src = in_data + h_id * src_widht; for (int w_id = threadIdx.x; w_id < src_widht; w_id += blockDim.x) { T ele = src[w_id]; int offset = expand_offset[h_id] * src_widht; for (int k = 0; k < span; ++k) { out_data[offset + k * src_widht + w_id] = ele; } } } } template <typename T> static __global__ void sequence_expand_as_grad_kernel( const T *dout_data, const size_t *expand_offset, const size_t dst_hight, const size_t dst_width, T *dx_data) { for (int h_id = blockIdx.x; h_id < dst_hight; h_id += gridDim.x) { T *dst = dx_data + h_id * dst_width; int span = expand_offset[h_id + 1] - expand_offset[h_id]; for (int w_id = threadIdx.x; w_id < dst_width; w_id += blockDim.x) { T result = 0; for (int k = 0; k < span; ++k) { int offset = (expand_offset[h_id] + k) * dst_width; const T *src = dout_data + offset; result += src[w_id]; } dst[w_id] = result; } } } template <typename T> struct SequenceExpandAsFunctor<platform::CUDADeviceContext, T> { void operator()( const platform::CUDADeviceContext &context, const LoDTensor &x, const framework::Vector<size_t> &ref_lod, /*expand referenced lod*/ LoDTensor *out) { int height = x.dims()[0]; int width = phi::product(x.dims()) / height; const int kThreadsPerBlock = 1024; int thread_x = kThreadsPerBlock; if (width < kThreadsPerBlock) { // block_cols is aligned by 32. thread_x = ((width + 31) >> 5) << 5; } int max_threads = context.GetMaxPhysicalThreadCount(); int block_x = std::max(max_threads / thread_x, 1); dim3 block_size(thread_x); dim3 grid_size(block_x); paddle::framework::MixVector<size_t> mixv_ref_lod(&ref_lod); sequence_expand_as_kernel<<<grid_size, block_size, 0, context.stream()>>>( x.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, width, out->mutable_data<T>(context.GetPlace())); } }; template <typename T> struct SequenceExpandAsGradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext &context, const LoDTensor &dout, const framework::Vector<size_t> &ref_lod, /*expand based lod*/ LoDTensor *dx) { int height = dx->dims()[0]; int width = phi::product(dx->dims()) / height; const int kThreadsPerBlock = 1024; int thread_x = kThreadsPerBlock; if (width < kThreadsPerBlock) { // block_cols is aligned by 32. thread_x = ((width + 31) >> 5) << 5; } int max_threads = context.GetMaxPhysicalThreadCount(); int block_x = std::max(max_threads / thread_x, 1); dim3 block_size(thread_x); dim3 grid_size(block_x); paddle::framework::MixVector<size_t> mixv_ref_lod(&ref_lod); sequence_expand_as_grad_kernel<<<grid_size, block_size, 0, context.stream()>>>( dout.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, width, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_expand_as, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( sequence_expand_as_grad, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
eb9591d0090a07c3d5823084394833eae427ddaa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <string.h> #include <stdlib.h> #include "include\externs.h" #include "include\cephes.h" #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> // threadIdx #include <hip/device_functions.h> // __syncthreads() #include <ctime> #include <time.h> #include <Windows.h> #pragma comment(lib, "cudart") // CUDA runtime API () /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * R U N S T E S T * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define BLOCK_SIZE 1024 // . 128, 256, 512, 1024 // __global__ void funcR (int nn, int * inData, int * outData) { int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0) { if (inData[i] != inData [i - 1]) outData[i] = 1; else outData[i] = 0; } else outData[i] = 0; } __global__ void reduce1 (int nn, int * inData, int * outData) { //__shared__ short int data[BLOCK_SIZE]; __shared__ int data[BLOCK_SIZE]; int tid = threadIdx.x; int i = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (i + blockDim.x < nn) // , .. 1024 data[tid] = inData[i] + inData[i + blockDim.x]; else data[tid] = inData[i]; __syncthreads(); for (int s = blockDim.x / 2; s > 32; s = s / 2 ) { if (tid < s) if (i + s < nn) // , .. 1024. data[tid] += data [tid +s]; __syncthreads(); } if (tid < 32) // .. 32 , { data[tid] += data[tid + 32]; data[tid] += data[tid + 16]; data[tid] += data[tid + 8]; data[tid] += data[tid + 4]; data[tid] += data[tid + 2]; data[tid] += data[tid + 1]; } if (tid == 0) // outData [blockIdx.x] = data [0]; } int reduce1 (int * data, int n) { int numBytes = n * sizeof (int); int NumThreads = BLOCK_SIZE; // int NumBloks = ceil((float) n / NumThreads); // n int sum = 0; // size_t free = 0; size_t total = 0; // GPU hipSetDevice(0); // CPU int * inD = new int [n]; int * outD = new int [n]; // GPU int * inDev = NULL; int * outDev = NULL; // GPU hipMalloc ((void**)&inDev, numBytes); hipMalloc ((void**)&outDev, numBytes); // GPU hipMemcpyAsync (inDev, data, numBytes, hipMemcpyHostToDevice); // int NewNumBloks = ceil((float) NumBloks / 2 ); // n dim3 threads = dim3(NumThreads); dim3 bloks = dim3(NewNumBloks); // hipEvent_t start, stop; float gpuTime = 0.0f; // hipEventCreate (&start); hipEventCreate (&stop); // strat hipEventRecord (start, 0); // hipLaunchKernelGGL(( reduce1), dim3(bloks), dim3(threads), 0, 0, n, inDev, outDev); // // stop hipEventRecord (stop, 0); // , stop hipEventSynchronize (stop); // start stop hipEventElapsedTime ( &gpuTime, start, stop); printf("GPU compute time ( ): %.9f millseconds\n", gpuTime); // hipEventDestroy (start); hipEventDestroy (stop); // CPU hipMemcpy (outD, outDev, numBytes, hipMemcpyDeviceToHost); // hipFree (inDev); hipFree (outDev); printf("Sums of ones:\n"); for (int i = 0; i < NewNumBloks; i++) printf("%d) %d\n", i, outD[i]); sum = 0; if (NewNumBloks > BLOCK_SIZE) // , GPU sum = reduce1(outD, NewNumBloks); else // , CPU { for (int i = 0; i < NewNumBloks; i++) sum += outD[i]; delete [] outD; } return sum; } int funcR (int * data, int n) { int numBytes = n * sizeof (int); int NumThreads = BLOCK_SIZE; // int NumBloks = ceil((float) n / NumThreads); // n int sum = 0; // size_t free = 0; size_t total = 0; // GPU hipSetDevice(0); // CPU int * inD = new int [n]; int * outD = new int [n]; // GPU int * inDev = NULL; int * outDev = NULL; // GPU hipMalloc ((void**)&inDev, numBytes); hipMalloc ((void**)&outDev, numBytes); // GPU hipMemcpyAsync (inDev, data, numBytes, hipMemcpyHostToDevice); // dim3 threads = dim3(NumThreads); dim3 bloks = dim3(NumBloks); // hipEvent_t start, stop; float gpuTime = 0.0f; // hipEventCreate (&start); hipEventCreate (&stop); // strat hipEventRecord (start, 0); // hipLaunchKernelGGL(( funcR), dim3(bloks), dim3(threads), 0, 0, n, inDev, outDev); // // stop hipEventRecord (stop, 0); // , stop hipEventSynchronize (stop); // start stop hipEventElapsedTime ( &gpuTime, start, stop); printf("GPU compute time ( ): %.9f millseconds\n", gpuTime); // hipEventDestroy (start); hipEventDestroy (stop); // CPU hipMemcpy (outD, outDev, numBytes, hipMemcpyDeviceToHost); // hipFree (inDev); hipFree (outDev); /* printf(" 1 0 0 1:\n"); for (int i = 0; i < n; i++) { if (i % BLOCK_SIZE == 0) printf("\n"); printf("%d) %d ", i, outD[i]); } printf("\n"); */ sum = reduce1(outD, n); printf("sum = %d\n", sum); delete [] outD; return sum; } void Runs(int n) { int S, S2, k; double pi, pi2, V, V2, erfc_arg, p_value; int * a = new int [n]; for (int i = 0; i < n; i++) a[i] = epsilon[i]; S = reduce1 (a, n); pi = (double)S / (double)n; // LARGE_INTEGER frequency0; // ticks per second LARGE_INTEGER t10, t20; // ticks double elapsedTime0; //// get ticks per second QueryPerformanceFrequency(&frequency0); // start timer QueryPerformanceCounter(&t10); S2 = 0; for (k = 0; k < n; k++) if (epsilon[k]) S2++; // stop timer QueryPerformanceCounter(&t20); // compute and print the elapsed time in millisec elapsedTime0 = (t20.QuadPart - t10.QuadPart) * 1000.0 / frequency0.QuadPart; // CPU printf ("CPU compute time ( ): %f\n", elapsedTime0); pi2 = (double)S2 / (double)n; printf ("GPU: S = %d, pi = %lf\n", S, pi); printf ("CPU: S = %d, pi2 = %lf\n", S2, pi2); if ( fabs(pi - 0.5) > (2.0 / sqrt((double)n)) ) { fprintf(stats[TEST_RUNS], "\t\t\t\tRUNS TEST\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\tPI ESTIMATOR CRITERIA NOT MET! PI = %f\n", pi); p_value = 0.0; } else { V = 1; V2 = 1; // LARGE_INTEGER frequency1; // ticks per second LARGE_INTEGER t11, t21; // ticks double elapsedTime1; //// get ticks per second QueryPerformanceFrequency(&frequency1); // start timer QueryPerformanceCounter(&t11); for (k = 1; k < n; k++) if (epsilon[k] != epsilon[k-1]) V2++; // stop timer QueryPerformanceCounter(&t21); // compute and print the elapsed time in millisec elapsedTime1 = (t21.QuadPart - t11.QuadPart) * 1000.0 / frequency1.QuadPart; // CPU printf ("CPU compute time ( ): %f\n", elapsedTime1); V += funcR(a, n); printf ("GPU: V = %lf\n", V); printf ("CPU: V2 = %lf\n", V2); erfc_arg = fabs(V2 - 2.0 * n * pi2 * (1-pi2)) / (2.0 * pi2 * (1-pi2) * sqrt(2.0*n)); p_value = cephes_erfc(erfc_arg); fprintf(stats[TEST_RUNS], "\t\t\t\tRUNS TEST\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\tCOMPUTATIONAL INFORMATION:\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\t(a) Pi = %f\n", pi); fprintf(stats[TEST_RUNS], "\t\t(b) V_n_obs (Total # of runs) = %d\n", (int)V); fprintf(stats[TEST_RUNS], "\t\t(c) V_n_obs - 2 n pi (1-pi)\n"); fprintf(stats[TEST_RUNS], "\t\t ----------------------- = %f\n", erfc_arg); fprintf(stats[TEST_RUNS], "\t\t 2 sqrt(2n) pi (1-pi)\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); if ( isNegative(p_value) || isGreaterThanOne(p_value) ) fprintf(stats[TEST_RUNS], "WARNING: P_VALUE IS OUT OF RANGE.\n"); fprintf(stats[TEST_RUNS], "%s\t\tp_value = %f\n\n", p_value < ALPHA ? "FAILURE" : "SUCCESS", p_value); fflush(stats[TEST_RUNS]); } fprintf(results[TEST_RUNS], "%f\n", p_value); fflush(results[TEST_RUNS]); }
eb9591d0090a07c3d5823084394833eae427ddaa.cu
#include <stdio.h> #include <math.h> #include <string.h> #include <stdlib.h> #include "include\externs.h" #include "include\cephes.h" #include <iostream> #include <cuda_runtime.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> // для threadIdx #include <device_functions.h> // для __syncthreads() #include <ctime> #include <time.h> #include <Windows.h> #pragma comment(lib, "cudart") // динамическая бибилиотека для CUDA runtime API (высокоуровневый) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * R U N S T E S T * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define BLOCK_SIZE 1024 // это максимально возможное число нитей в блоке. 128, 256, 512, 1024 // Считаем суммы для каждого блока __global__ void funcR (int nn, int * inData, int * outData) { int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0) { if (inData[i] != inData [i - 1]) outData[i] = 1; else outData[i] = 0; } else outData[i] = 0; } __global__ void reduce1 (int nn, int * inData, int * outData) { //__shared__ short int data[BLOCK_SIZE]; __shared__ int data[BLOCK_SIZE]; int tid = threadIdx.x; int i = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (i + blockDim.x < nn) // проверка нужна, т.к. не все входные последовательности кратны 1024 data[tid] = inData[i] + inData[i + blockDim.x]; else data[tid] = inData[i]; __syncthreads(); for (int s = blockDim.x / 2; s > 32; s = s / 2 ) { if (tid < s) if (i + s < nn) // проверка нужна, т.к. не все входные последовательности кратны 1024. чтобы не суммировалось лишнее data[tid] += data [tid +s]; __syncthreads(); } if (tid < 32) // т.к. в в варпе 32 нити, то синхронизация не требуется { data[tid] += data[tid + 32]; data[tid] += data[tid + 16]; data[tid] += data[tid + 8]; data[tid] += data[tid + 4]; data[tid] += data[tid + 2]; data[tid] += data[tid + 1]; } if (tid == 0) // сохранить сумму элементов блока outData [blockIdx.x] = data [0]; } int reduce1 (int * data, int n) { int numBytes = n * sizeof (int); int NumThreads = BLOCK_SIZE; // количество нитей в блоке int NumBloks = ceil((float) n / NumThreads); // высчитываем количество блоков при заданном n и размере блока с округлением в большую сторону int sum = 0; //счетчики времени size_t free = 0; size_t total = 0; // Выбрать первый GPU для работы cudaSetDevice(0); // Выделить память на CPU int * inD = new int [n]; int * outD = new int [n]; // выделяем память на GPU int * inDev = NULL; int * outDev = NULL; // выделяем память на GPU cudaMalloc ((void**)&inDev, numBytes); cudaMalloc ((void**)&outDev, numBytes); // копируем данные на GPU cudaMemcpyAsync (inDev, data, numBytes, cudaMemcpyHostToDevice); // запуск ядра int NewNumBloks = ceil((float) NumBloks / 2 ); // высчитываем количество блоков при заданном n и размере блока с округлением в большую сторону dim3 threads = dim3(NumThreads); dim3 bloks = dim3(NewNumBloks); // Засекаем время cudaEvent_t start, stop; float gpuTime = 0.0f; // создаем события начала и окончания выполнения ядра cudaEventCreate (&start); cudaEventCreate (&stop); // Привязываем событие strat к текущему месту cudaEventRecord (start, 0); // запуск ядра reduce1<<<bloks, threads>>> (n, inDev, outDev); // находим сумму // привязываем событие stop к данному месту cudaEventRecord (stop, 0); // Дожидаемся реального окончания выполнения ядра, используя возможность синхронизации по событию stop cudaEventSynchronize (stop); // Запрашиваем время между событиями start и stop cudaEventElapsedTime ( &gpuTime, start, stop); printf("GPU compute time (находим сумму): %.9f millseconds\n", gpuTime); // Уничтожаем созданные события cudaEventDestroy (start); cudaEventDestroy (stop); // копируем результат обратно в CPU cudaMemcpy (outD, outDev, numBytes, cudaMemcpyDeviceToHost); // освобождаем память cudaFree (inDev); cudaFree (outDev); printf("Sums of ones:\n"); for (int i = 0; i < NewNumBloks; i++) printf("%d) %d\n", i, outD[i]); sum = 0; if (NewNumBloks > BLOCK_SIZE) // если блоков много, то запускаем повторное суммирование на GPU sum = reduce1(outD, NewNumBloks); else // если мало, то подсчитываем результат на CPU { for (int i = 0; i < NewNumBloks; i++) sum += outD[i]; delete [] outD; } return sum; } int funcR (int * data, int n) { int numBytes = n * sizeof (int); int NumThreads = BLOCK_SIZE; // количество нитей в блоке int NumBloks = ceil((float) n / NumThreads); // высчитываем количество блоков при заданном n и размере блока с округлением в большую сторону int sum = 0; //счетчики времени size_t free = 0; size_t total = 0; // Выбрать первый GPU для работы cudaSetDevice(0); // Выделить память на CPU int * inD = new int [n]; int * outD = new int [n]; // выделяем память на GPU int * inDev = NULL; int * outDev = NULL; // выделяем память на GPU cudaMalloc ((void**)&inDev, numBytes); cudaMalloc ((void**)&outDev, numBytes); // копируем данные на GPU cudaMemcpyAsync (inDev, data, numBytes, cudaMemcpyHostToDevice); // запуск ядра dim3 threads = dim3(NumThreads); dim3 bloks = dim3(NumBloks); // Засекаем время cudaEvent_t start, stop; float gpuTime = 0.0f; // создаем события начала и окончания выполнения ядра cudaEventCreate (&start); cudaEventCreate (&stop); // Привязываем событие strat к текущему месту cudaEventRecord (start, 0); // запуск ядра funcR<<<bloks, threads>>> (n, inDev, outDev); // находим сумму // привязываем событие stop к данному месту cudaEventRecord (stop, 0); // Дожидаемся реального окончания выполнения ядра, используя возможность синхронизации по событию stop cudaEventSynchronize (stop); // Запрашиваем время между событиями start и stop cudaEventElapsedTime ( &gpuTime, start, stop); printf("GPU compute time (находим число переходов): %.9f millseconds\n", gpuTime); // Уничтожаем созданные события cudaEventDestroy (start); cudaEventDestroy (stop); // копируем результат обратно в CPU cudaMemcpy (outD, outDev, numBytes, cudaMemcpyDeviceToHost); // освобождаем память cudaFree (inDev); cudaFree (outDev); /* printf("Массив поиска переходов из 1 в 0 и из 0 в 1:\n"); for (int i = 0; i < n; i++) { if (i % BLOCK_SIZE == 0) printf("\n"); printf("%d) %d ", i, outD[i]); } printf("\n"); */ sum = reduce1(outD, n); printf("sum = %d\n", sum); delete [] outD; return sum; } void Runs(int n) { int S, S2, k; double pi, pi2, V, V2, erfc_arg, p_value; int * a = new int [n]; for (int i = 0; i < n; i++) a[i] = epsilon[i]; S = reduce1 (a, n); pi = (double)S / (double)n; // Запускаем таймер LARGE_INTEGER frequency0; // ticks per second LARGE_INTEGER t10, t20; // ticks double elapsedTime0; //// get ticks per second QueryPerformanceFrequency(&frequency0); // start timer QueryPerformanceCounter(&t10); S2 = 0; for (k = 0; k < n; k++) if (epsilon[k]) S2++; // stop timer QueryPerformanceCounter(&t20); // compute and print the elapsed time in millisec elapsedTime0 = (t20.QuadPart - t10.QuadPart) * 1000.0 / frequency0.QuadPart; //Выводим время выполнения функции на CPU printf ("CPU compute time (находим число единиц): %f\n", elapsedTime0); pi2 = (double)S2 / (double)n; printf ("GPU: S = %d, pi = %lf\n", S, pi); printf ("CPU: S = %d, pi2 = %lf\n", S2, pi2); if ( fabs(pi - 0.5) > (2.0 / sqrt((double)n)) ) { fprintf(stats[TEST_RUNS], "\t\t\t\tRUNS TEST\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\tPI ESTIMATOR CRITERIA NOT MET! PI = %f\n", pi); p_value = 0.0; } else { V = 1; V2 = 1; // Запускаем таймер LARGE_INTEGER frequency1; // ticks per second LARGE_INTEGER t11, t21; // ticks double elapsedTime1; //// get ticks per second QueryPerformanceFrequency(&frequency1); // start timer QueryPerformanceCounter(&t11); for (k = 1; k < n; k++) if (epsilon[k] != epsilon[k-1]) V2++; // stop timer QueryPerformanceCounter(&t21); // compute and print the elapsed time in millisec elapsedTime1 = (t21.QuadPart - t11.QuadPart) * 1000.0 / frequency1.QuadPart; //Выводим время выполнения функции на CPU printf ("CPU compute time (находим число переходов): %f\n", elapsedTime1); V += funcR(a, n); printf ("GPU: V = %lf\n", V); printf ("CPU: V2 = %lf\n", V2); erfc_arg = fabs(V2 - 2.0 * n * pi2 * (1-pi2)) / (2.0 * pi2 * (1-pi2) * sqrt(2.0*n)); p_value = cephes_erfc(erfc_arg); fprintf(stats[TEST_RUNS], "\t\t\t\tRUNS TEST\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\tCOMPUTATIONAL INFORMATION:\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); fprintf(stats[TEST_RUNS], "\t\t(a) Pi = %f\n", pi); fprintf(stats[TEST_RUNS], "\t\t(b) V_n_obs (Total # of runs) = %d\n", (int)V); fprintf(stats[TEST_RUNS], "\t\t(c) V_n_obs - 2 n pi (1-pi)\n"); fprintf(stats[TEST_RUNS], "\t\t ----------------------- = %f\n", erfc_arg); fprintf(stats[TEST_RUNS], "\t\t 2 sqrt(2n) pi (1-pi)\n"); fprintf(stats[TEST_RUNS], "\t\t------------------------------------------\n"); if ( isNegative(p_value) || isGreaterThanOne(p_value) ) fprintf(stats[TEST_RUNS], "WARNING: P_VALUE IS OUT OF RANGE.\n"); fprintf(stats[TEST_RUNS], "%s\t\tp_value = %f\n\n", p_value < ALPHA ? "FAILURE" : "SUCCESS", p_value); fflush(stats[TEST_RUNS]); } fprintf(results[TEST_RUNS], "%f\n", p_value); fflush(results[TEST_RUNS]); }
d103fc74008c6956ae884f00ed31141b40f3ed18.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <sstream> #include <string> #include "Matrix.h" #include "KernelMatrix.h" #include <windows.h> #include <ctime> using namespace std; int main() { hipEvent_t start, stop; float rumtime; hipEventCreate(&start); hipEventCreate(&stop); Matrix* matrix; // Matrix* outMatrix; // int errcode; // int cylceTimes = 1; // time_t start_1,end_1,time_1; // ctime // KernelMatrix km = KernelMatrix(); // MatrixBasicOp::newMatrix(&matrix); MatrixBasicOp::newMatrix(&outMatrix); // // MatrixBasicOp::readFromFile("a1_raw.csv",18,100,matrix); MatrixBasicOp::readFromFile("a2_raw.csv",18,100,matrix); // MatrixBasicOp::makeAtCurrentDevice(outMatrix, 100,100); // // MatrixBasicOp::makeAtHost(outMatrix, 1000,1000); // MatrixBasicOp::copyToCurrentDevice(matrix); // GPU hipEventRecord(start, 0); // CPU // DWORD start_time = GetTickCount(); //start_1 = clock(); // for (int i = 0; i < cylceTimes; i++) { errcode = km.calcKernelMatrix(matrix, outMatrix); //errcode = km.calcKernelMatrixSeriel(matrix, outMatrix); } // CPU //end_1 = clock(); //DWORD end_time = GetTickCount(); //DWORD used_time = end_time - start_time; // cout << used_time << endl; //cout << end_1 - start_1 << endl; // GPU hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&rumtime, start, stop); if (errcode != NO_ERROR) { cout << "kernel matrix error:" << errcode << endl; } else { cout << "kernel matrix success" << endl; cout << "the time is " << (rumtime) / 100 << " ms" << endl; } // MatrixBasicOp::copyToHost(outMatrix); // //MatrixBasicOp::writeToFile("output1.csv",outMatrix); MatrixBasicOp::writeToFile("output2.csv",outMatrix); }
d103fc74008c6956ae884f00ed31141b40f3ed18.cu
#include <iostream> #include <fstream> #include <sstream> #include <string> #include "Matrix.h" #include "KernelMatrix.h" #include <windows.h> #include <ctime> using namespace std; int main() { cudaEvent_t start, stop; float rumtime; cudaEventCreate(&start); cudaEventCreate(&stop); Matrix* matrix; // 输入样本矩阵 Matrix* outMatrix; // 输出核矩阵 int errcode; // 错误码 int cylceTimes = 1; // 循环次数 time_t start_1,end_1,time_1; // 使用 ctime 的计时方式 // 创建操作类 KernelMatrix km = KernelMatrix(); // 创建矩阵 MatrixBasicOp::newMatrix(&matrix); MatrixBasicOp::newMatrix(&outMatrix); // 从文件中读取矩阵 // MatrixBasicOp::readFromFile("a1_raw.csv",18,100,matrix); MatrixBasicOp::readFromFile("a2_raw.csv",18,100,matrix); // 在当前设备创建矩阵 MatrixBasicOp::makeAtCurrentDevice(outMatrix, 100,100); // 在主机端创建矩阵 // MatrixBasicOp::makeAtHost(outMatrix, 1000,1000); // 将输入矩阵拷贝到当前设备上 MatrixBasicOp::copyToCurrentDevice(matrix); // GPU开始计时 cudaEventRecord(start, 0); // CPU 开始计时 // DWORD start_time = GetTickCount(); //start_1 = clock(); // 调用函数计算核矩阵 for (int i = 0; i < cylceTimes; i++) { errcode = km.calcKernelMatrix(matrix, outMatrix); //errcode = km.calcKernelMatrixSeriel(matrix, outMatrix); } // CPU 结束统计时间 //end_1 = clock(); //DWORD end_time = GetTickCount(); //DWORD used_time = end_time - start_time; // cout << used_time << endl; //cout << end_1 - start_1 << endl; // GPU 结束计时 cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&rumtime, start, stop); if (errcode != NO_ERROR) { cout << "kernel matrix error:" << errcode << endl; } else { cout << "kernel matrix success" << endl; cout << "the time is " << (rumtime) / 100 << " ms" << endl; } // 将结果拷贝到主机端内存 MatrixBasicOp::copyToHost(outMatrix); // 将矩阵写入到文件中 //MatrixBasicOp::writeToFile("output1.csv",outMatrix); MatrixBasicOp::writeToFile("output2.csv",outMatrix); }
cc5449afb0352606c222f76f43f7a06afadd90ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> d, Thu Oct 8 23:05:37 2020 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "shuffle.cuh" #include "batched_kernel_param.h" // This kernel uses registers for matrix storage, shared mem. and shuffle for communication. // It also uses lazy swap. extern __shared__ double ddata[]; template<int N, int NSHFL> __global__ void dgetrf_batched_smallsq_shfl_kernel( double** dA_array, int ldda, magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; double* dA = dA_array[batchid]; magma_int_t* ipiv = ipiv_array[batchid]; magma_int_t* info = &info_array[batchid]; double rA[N] = {MAGMA_D_ZERO}; double y[N] = {MAGMA_D_ZERO}; double reg = MAGMA_D_ZERO; double update = MAGMA_D_ZERO; int max_id, current_piv_tx, rowid = tx, linfo = 0; double rx_abs_max = MAGMA_D_ZERO; // shared memory pointers double* sx = (double*)(ddata); int* sipiv = (int*)(sx + blockDim.y * NSHFL); sx += ty * NSHFL; sipiv += ty * (NSHFL+1); volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL); // read if( tx < N ){ #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } } #pragma unroll for(int i = 0; i < N; i++){ sx[ rowid ] = fabs(MAGMA_D_REAL( rA[i] )) + fabs(MAGMA_D_IMAG( rA[i] )); magmablas_syncwarp(); rx_abs_max = sx[i]; max_id = i; #pragma unroll for(int j = i; j < N; j++){ if( sx[j] > rx_abs_max){ max_id = j; rx_abs_max = sx[j]; } } linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo; update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_D_ZERO : MAGMA_D_ONE; if(rowid == max_id){ sipiv[i] = max_id; rowid = i; (*scurrent_piv_tx) = tx; } else if(rowid == i){ rowid = max_id; } current_piv_tx = (*scurrent_piv_tx); magmablas_syncwarp(); #pragma unroll for(int j = i; j < N; j++){ y[j] = update * magmablas_dshfl( rA[j], current_piv_tx, NSHFL); } reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_D_ONE : MAGMA_D_DIV(MAGMA_D_ONE, y[i] ); // scal and ger if( rowid > i ){ rA[i] *= reg; #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * y[j]; } } } // write if( tx == 0 ){ (*info) = (magma_int_t)linfo; } if(tx < N) { ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); #pragma unroll for(int i = 0; i < N; i++){ dA[ i * ldda + rowid ] = rA[i]; } } } /***************************************************************************//** Purpose ------- dgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A using partial pivoting with row interchanges. This routine can deal only with square matrices of size up to 32 The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. This is a batched version that factors batchCount M-by-N matrices in parallel. dA, ipiv, and info become arrays with one entry per matrix. Arguments --------- @param[in] n INTEGER The size of each matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N). On entry, each pointer is an M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of each array A. LDDA >= max(1,M). @param[out] ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices. Each is an INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. - > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_getrf_batched *******************************************************************************/ extern "C" magma_int_t magma_dgetrf_batched_smallsq_shfl( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0) return 0; const magma_int_t ntcol = magma_get_dgetrf_batched_ntcol(m, n); magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int); shmem += ntcol * magma_ceilpow2(m) * sizeof(double); shmem += ntcol * 1 * sizeof(int); dim3 threads(magma_ceilpow2(m), ntcol, 1); const magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); switch(m){ case 1:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 2:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 3:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 4:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 5:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 6:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 7:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 8:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 9:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 10:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 11:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 12:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 13:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 14:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 15:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 16:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 17:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 18:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 19:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 20:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 21:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 22:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 23:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 24:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 25:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 26:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 27:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 28:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 29:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 30:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 31:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 32:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
cc5449afb0352606c222f76f43f7a06afadd90ff.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> d, Thu Oct 8 23:05:37 2020 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "shuffle.cuh" #include "batched_kernel_param.h" // This kernel uses registers for matrix storage, shared mem. and shuffle for communication. // It also uses lazy swap. extern __shared__ double ddata[]; template<int N, int NSHFL> __global__ void dgetrf_batched_smallsq_shfl_kernel( double** dA_array, int ldda, magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; double* dA = dA_array[batchid]; magma_int_t* ipiv = ipiv_array[batchid]; magma_int_t* info = &info_array[batchid]; double rA[N] = {MAGMA_D_ZERO}; double y[N] = {MAGMA_D_ZERO}; double reg = MAGMA_D_ZERO; double update = MAGMA_D_ZERO; int max_id, current_piv_tx, rowid = tx, linfo = 0; double rx_abs_max = MAGMA_D_ZERO; // shared memory pointers double* sx = (double*)(ddata); int* sipiv = (int*)(sx + blockDim.y * NSHFL); sx += ty * NSHFL; sipiv += ty * (NSHFL+1); volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL); // read if( tx < N ){ #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } } #pragma unroll for(int i = 0; i < N; i++){ sx[ rowid ] = fabs(MAGMA_D_REAL( rA[i] )) + fabs(MAGMA_D_IMAG( rA[i] )); magmablas_syncwarp(); rx_abs_max = sx[i]; max_id = i; #pragma unroll for(int j = i; j < N; j++){ if( sx[j] > rx_abs_max){ max_id = j; rx_abs_max = sx[j]; } } linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo; update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_D_ZERO : MAGMA_D_ONE; if(rowid == max_id){ sipiv[i] = max_id; rowid = i; (*scurrent_piv_tx) = tx; } else if(rowid == i){ rowid = max_id; } current_piv_tx = (*scurrent_piv_tx); magmablas_syncwarp(); #pragma unroll for(int j = i; j < N; j++){ y[j] = update * magmablas_dshfl( rA[j], current_piv_tx, NSHFL); } reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_D_ONE : MAGMA_D_DIV(MAGMA_D_ONE, y[i] ); // scal and ger if( rowid > i ){ rA[i] *= reg; #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * y[j]; } } } // write if( tx == 0 ){ (*info) = (magma_int_t)linfo; } if(tx < N) { ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); #pragma unroll for(int i = 0; i < N; i++){ dA[ i * ldda + rowid ] = rA[i]; } } } /***************************************************************************//** Purpose ------- dgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A using partial pivoting with row interchanges. This routine can deal only with square matrices of size up to 32 The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. This is a batched version that factors batchCount M-by-N matrices in parallel. dA, ipiv, and info become arrays with one entry per matrix. Arguments --------- @param[in] n INTEGER The size of each matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N). On entry, each pointer is an M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of each array A. LDDA >= max(1,M). @param[out] ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices. Each is an INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. - > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_getrf_batched *******************************************************************************/ extern "C" magma_int_t magma_dgetrf_batched_smallsq_shfl( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0) return 0; const magma_int_t ntcol = magma_get_dgetrf_batched_ntcol(m, n); magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int); shmem += ntcol * magma_ceilpow2(m) * sizeof(double); shmem += ntcol * 1 * sizeof(int); dim3 threads(magma_ceilpow2(m), ntcol, 1); const magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); switch(m){ case 1: dgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 2: dgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 3: dgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 4: dgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 5: dgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 6: dgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 7: dgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 8: dgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 9: dgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 10: dgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 11: dgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 12: dgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 13: dgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 14: dgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 15: dgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 16: dgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 17: dgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 18: dgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 19: dgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 20: dgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 21: dgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 22: dgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 23: dgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 24: dgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 25: dgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 26: dgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 27: dgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 28: dgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 29: dgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 30: dgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 31: dgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 32: dgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
e0974f938ece0c4aeb091104d3da863d3383c970.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <iomanip> #include "image.h" #define BUF_SIZE 256 #define MAX_ITER 10000 using namespace std; class errorPNM { }; struct Color { unsigned char r; unsigned char g; unsigned char b; }; void readPNM(ifstream &file, char* buf); image<unsigned char>* loadPGM(const char* name); void savePPM(image<Color>* im, const char* name); Color randomColor(); __global__ void evolveContour(unsigned char* intensityDev, unsigned char* labelsDev, signed char* speedDev, signed char* phiDev, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds); __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound); __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH); __device__ volatile int stopCondition[1024]; int main(int argc, char* argv[]) { // Hard Coding Struct to print same way as other implementations (to compare images) struct Color colors[32]; for (int i = 0; i < 32; i++){ colors[i] = randomColor(); } colors[0].r = 204; colors[0].g = 0; colors[0].b = 0; colors[1].r = 204; colors[1].g = 102; colors[1].b = 0; colors[2].r = 204; colors[2].g = 204; colors[2].b = 0; colors[3].r = 102; colors[3].g = 204; colors[3].b = 0; colors[4].r = 0; colors[4].g = 204; colors[4].b = 0; colors[5].r = 0; colors[5].g = 204; colors[5].b = 102; colors[6].r = 0; colors[6].g = 204; colors[6].b = 204; colors[7].r = 0; colors[7].g = 102; colors[7].b = 204; colors[8].r = 0; colors[8].g = 0; colors[8].b = 204; colors[9].r = 102; colors[9].g = 0; colors[9].b = 204; // Parse command line arguments char* imageFile = NULL; char* labelFile = NULL; char* paramFile = NULL; int numRepetitions = 1; //bool produceOutput = false; for(int i=1; i<argc; i++) { if(strcmp(argv[i], "--image") == 0) { if(i+1 < argc) imageFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--labels") == 0) { if(i+1 < argc) labelFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--params") == 0) { if(i+1 < argc) paramFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--reps") == 0) { if(i+1 < argc) { numRepetitions = atoi(argv[++i]); if(numRepetitions < 1) { cerr << "Number of repetitions must be greater than 0." << endl; exit(1); } } else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } //else if(strcmp(argv[i], "-o") == 0 || strcmp(argv[i], "--output") == 0) //produceOutput = true; else if(strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) { cout << "Usage: " << argv[0] << " [OPTIONS] --image <file> --labels <file> --params <file>" << endl; cout << "The order of switches does not matter so long as each one is immediately followed by its appropriate" << endl; cout << "argument (if one is required).\n" << endl; cout << "Utilizes a massively parallelized level set algorithm to segment the desired region of interest in" << endl; cout << "a grayscale image based on a given intensity range. Developed by Brett Daley as part of the NUPAR" << endl; cout << "benchmark suite.\n" << endl; cout << "Required arguments:" << endl; cout << " --image <file> Grayscale image to be segmented (intensities must be between 0 and 255)." << endl; cout << " --labels <file> Stripe-Based Connected Components Labeling output file. Used to seed the" << endl; cout << " initial contour." << endl; cout << " --params <file> Text file requiring the following format:" << endl; cout << " <Target label>" << endl; cout << " <Lower intensity bound>" << endl; cout << " <Upper intensity bound>" << endl; cout << " ..." << endl; cout << " Having multiple sets of three lines will segment the image multiple times according" << endl; cout << " to the different parameters. Utilizes dynamic parallelism." << endl; cout << "Options:" << endl; cout << " --reps <number> Run the program the specified number of times, enabling concurrent kernel execution" << endl; cout << " via Hyper-Q. Useful for performance benchmarking. [Default: 1]" << endl; cout << " -o, --output Output an RGB image for each target label specified in the params file. Use MATLAB's" << endl; cout << " 'imshow' command to view." << endl; cout << " -h, --help Display this information and exit." << endl; exit(0); } else { cerr << "Did not recognize '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } if(imageFile == NULL || labelFile == NULL || paramFile == NULL) { cerr << "Missing one or more arguments. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } // Initialize timers, start the runtime timer hipEvent_t startTime1, startTime2, stopTime1, stopTime2; hipEventCreate(&startTime1); hipEventCreate(&startTime2); hipEventCreate(&stopTime1); hipEventCreate(&stopTime2); float elapsedTime1, elapsedTime2; hipEventRecord(startTime1, 0); // Load image, send to GPU image<unsigned char>* input = loadPGM(imageFile); const int HEIGHT = input->height(); const int WIDTH = input->width(); const int SIZE = HEIGHT*WIDTH*sizeof(char); unsigned char* intensity = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&intensity[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* intensityDev = NULL; hipMalloc((void**)&intensityDev, numRepetitions*SIZE); hipMemcpyAsync(intensityDev, intensity, numRepetitions*SIZE, hipMemcpyHostToDevice); // Load connected component labels, send to GPU input = loadPGM(labelFile); unsigned char* labels = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&labels[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* labelsDev = NULL; hipMalloc((void **)&labelsDev, numRepetitions*SIZE); hipMemcpyAsync(labelsDev, labels, numRepetitions*SIZE, hipMemcpyHostToDevice); // Load parameters, send to GPU ifstream paramStream; paramStream.open(paramFile); if(paramStream.is_open() != true) { cerr << "Could not open '" << paramFile << "'." << endl; exit(1); } int targetLabels[1024]; int lowerIntensityBounds[1024]; int upperIntensityBounds[1024]; int numLabels = 0; while(paramStream.eof() == false) { char line[16]; paramStream.getline(line, 16); if(paramStream.eof() == true) break; if(numLabels % 3 == 0) targetLabels[numLabels/3] = strtol(line, NULL, 10); else if(numLabels % 3 == 1) lowerIntensityBounds[numLabels/3] = strtol(line, NULL, 10); else upperIntensityBounds[numLabels/3] = strtol(line, NULL, 10); numLabels++; } if(numLabels % 3 == 0) numLabels /= 3; else { cerr << "Number of lines in " << paramFile << " is not divisible by 3. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } paramStream.close(); int* targetLabelsDev = NULL; hipMalloc((void**)&targetLabelsDev, numLabels*sizeof(int)); hipMemcpyAsync(targetLabelsDev, targetLabels, numLabels*sizeof(int), hipMemcpyHostToDevice); int* lowerIntensityBoundsDev = NULL; hipMalloc((void**)&lowerIntensityBoundsDev, numLabels*sizeof(int)); hipMemcpyAsync(lowerIntensityBoundsDev, lowerIntensityBounds, numLabels*sizeof(int), hipMemcpyHostToDevice); int* upperIntensityBoundsDev = NULL; hipMalloc((void**)&upperIntensityBoundsDev, numLabels*sizeof(int)); hipMemcpyAsync(upperIntensityBoundsDev, upperIntensityBounds, numLabels*sizeof(int), hipMemcpyHostToDevice); // Allocate arrays for speed and phi in GPU memory signed char* speedDev = NULL; signed char* phiDev = NULL; hipMalloc((void**)&speedDev, numRepetitions*numLabels*SIZE); hipMalloc((void**)&phiDev, numRepetitions*numLabels*SIZE); hipDeviceSynchronize(); // Start the segmentation timer hipEventRecord(startTime2, 0); // Launch kernel to begin image segmenation for(int i=0; i<numRepetitions; i++) { hipLaunchKernelGGL(( evolveContour), dim3(1), dim3(numLabels), 0, 0, intensityDev, labelsDev, speedDev, phiDev, HEIGHT, WIDTH, targetLabelsDev, i, numLabels, lowerIntensityBoundsDev, upperIntensityBoundsDev); } hipDeviceSynchronize(); // Stop the segmentation timer hipEventRecord(stopTime2, 0); // Retrieve results from the GPU signed char* phi = new signed char[numRepetitions*numLabels*HEIGHT*WIDTH]; hipMemcpy(phi, phiDev, numRepetitions*numLabels*SIZE, hipMemcpyDeviceToHost); // Stop the runtime timer hipEventRecord(stopTime1, 0); // Output RGB images (if command line switch was present) //if(produceOutput == true) if (true) { Color color; srand(1000); // Create 1 image for all labels image<Color> output = image<Color>(WIDTH, HEIGHT, true); image<Color>* im = &output; // Initialize image (same as input) for(int i = 0 ; i < HEIGHT ; i++) { for(int j = 0 ; j < WIDTH ; j++){ color.r = intensity[i*WIDTH+j]; color.g = intensity[i*WIDTH+j]; color.b = intensity[i*WIDTH+j]; im->access[i][j] = color; } } for(int k = 0 ; k < numLabels ; k++) { Color randomcolor = colors[k]; for(int i = 0 ; i < HEIGHT ; i++) { for(int j = 0 ; j < WIDTH ; j++){ if (phi[k*HEIGHT*WIDTH+i*WIDTH+j] == -1){ color = randomcolor; im->access[i][j] = color; } else if (phi[k*HEIGHT*WIDTH+i*WIDTH+j] == -3){ color.r = randomcolor.r + 50; color.g = randomcolor.g + 50; color.b = randomcolor.b + 50; im->access[i][j] = color; } } } } char filename[64]; sprintf(filename, "result.ppm"); savePPM(im, filename); } // Stop runtime timer and print times hipEventElapsedTime(&elapsedTime1, startTime1, stopTime1); hipEventElapsedTime(&elapsedTime2, startTime2, stopTime2); cout << "Kernel Execution Time: " << setprecision(6) << elapsedTime2 << " ms"<< endl; cout << "Total GPU Execution Time: " << setprecision(6) << elapsedTime1 << " ms"<< endl; // Free resources and end the program hipEventDestroy(startTime1); hipEventDestroy(stopTime1); hipEventDestroy(startTime2); hipEventDestroy(stopTime2); hipFree(intensityDev); hipFree(labelsDev); hipFree(speedDev); hipFree(phiDev); hipFree(targetLabelsDev); hipFree(lowerIntensityBoundsDev); hipFree(upperIntensityBoundsDev); return 0; } image<unsigned char>* loadPGM(const char* name) { char buf[BUF_SIZE]; // Read header ifstream file(name, ios::in | ios::binary); readPNM(file, buf); if(strncmp(buf, "P5", 2)) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } readPNM(file, buf); int width = atoi(buf); readPNM(file, buf); int height = atoi(buf); readPNM(file, buf); if(atoi(buf) > UCHAR_MAX) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } // Read data image<unsigned char>* im = new image<unsigned char>(width, height); file.read((char*)imPtr(im, 0, 0), width*height*sizeof(unsigned char)); return im; } void readPNM(ifstream &file, char* buf) { char doc[BUF_SIZE]; char c; file >> c; while (c == '#') { file.getline(doc, BUF_SIZE); file >> c; } file.putback(c); file.width(BUF_SIZE); file >> buf; file.ignore(); } void savePPM(image<Color>* im, const char* name) { int width = im->width(); int height = im->height(); ofstream file(name, ios::out | ios::binary); file << "P6\n" << width << " " << height << "\n" << UCHAR_MAX << "\n"; file.write((char*)imPtr(im, 0, 0), width*height*sizeof(Color)); } Color randomColor() { Color c; c.r = (unsigned char) rand(); c.g = (unsigned char) rand(); c.b = (unsigned char) rand(); return c; } __global__ void evolveContour(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds) { int tid = threadIdx.x; intensity = &intensity[kernelID*HEIGHT*WIDTH]; labels = &labels[kernelID*HEIGHT*WIDTH]; speed = &speed[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; phi = &phi[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; dim3 dimGrid(WIDTH/30+1, HEIGHT/30+1); dim3 dimBlock(32, 32); hipLaunchKernelGGL(( initSpeedPhi), dim3(dimGrid), dim3(dimBlock), 0, 0, intensity, labels, speed, phi, HEIGHT, WIDTH, targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid]); int numIterations = 0; stopCondition[tid] = 1; while(stopCondition[tid] && numIterations < MAX_ITER) { stopCondition[tid] = 0; numIterations++; dimGrid.x = WIDTH/30+1; dimGrid.y = HEIGHT/30+1; // Outward evolution hipLaunchKernelGGL(( switchIn), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, HEIGHT, WIDTH); // Inward evolution hipLaunchKernelGGL(( switchOut), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, HEIGHT, WIDTH); // Check stopping condition on every third iteration if(numIterations % 3 == 0) { dimGrid.x = WIDTH/32+1; dimGrid.y = HEIGHT/32+1; hipLaunchKernelGGL(( checkStopCondition), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, tid, HEIGHT, WIDTH); hipDeviceSynchronize(); } else { stopCondition[tid] = 1; } } printf("Target label %d (intensities: %d-%d) converged in %d iterations.\n", targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid], numIterations); } __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int intensityReg; int speedReg; int phiReg; __shared__ int labelsTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { labelsTile[ty][tx] = labels[yPos*WIDTH+xPos]; intensityReg = intensity[yPos*WIDTH+xPos]; } // Initialization if(tx > 0 && tx < 31 && ty > 0 && ty < 31 && xPos < WIDTH-1 && yPos < HEIGHT-1) { // Phi if(labelsTile[ty][tx] != targetLabel) { if(labelsTile[ty][tx-1] != targetLabel && labelsTile[ty][tx+1] != targetLabel && labelsTile[ty-1][tx] != targetLabel && labelsTile[ty+1][tx] != targetLabel) phiReg = 3; else phiReg = 1; } else { if(labelsTile[ty][tx-1] != targetLabel || labelsTile[ty][tx+1] != targetLabel || labelsTile[ty-1][tx] != targetLabel || labelsTile[ty+1][tx] != targetLabel) phiReg = -1; else phiReg = -3; } // Speed if(intensityReg >= lowerIntensityBound && intensityReg <= upperIntensityBound) speedReg = 1; else speedReg = -1; // Load data back into global memory speed[yPos*WIDTH+xPos] = speedReg; phi[yPos*WIDTH+xPos] = phiReg; } } __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lout and add them to Lin if(phiTile[ty][tx] == 1 && speedReg > 0) phiTile[ty][tx] = -1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == 3) { if(phiTile[ty][tx-1] == -1 || phiTile[ty][tx+1] == -1 || phiTile[ty-1][tx] == -1 || phiTile[ty+1][tx] == -1) phiTile[ty][tx] = 1; } // Eliminate redundant points in Lin if(phiTile[ty][tx] == -1) { if(phiTile[ty][tx-1] < 0 && phiTile[ty][tx+1] < 0 && phiTile[ty-1][tx] < 0 && phiTile[ty+1][tx] < 0) phiTile[ty][tx] = -3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lin and add them to Lout if(phiTile[ty][tx] == -1 && speedReg < 0) phiTile[ty][tx] = 1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == -3) { if(phiTile[ty][tx-1] == 1 || phiTile[ty][tx+1] == 1 || phiTile[ty-1][tx] == 1 || phiTile[ty+1][tx] == 1) phiTile[ty][tx] = -1; } // Eliminate redundant points if(phiTile[ty][tx] == 1) { if(phiTile[ty][tx-1] > 0 && phiTile[ty][tx+1] > 0 && phiTile[ty-1][tx] > 0 && phiTile[ty+1][tx] > 0) phiTile[ty][tx] = 3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 32*bx + tx; int yPos = 32*by + ty; signed char speedReg; signed char phiReg; __shared__ int stop; stop = 0; __syncthreads(); // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiReg = phi[yPos*WIDTH+xPos]; // Falsify stop condition if criteria are not met if(phiReg == 1 && speedReg > 0) { stop = 1; } else if(phiReg == -1 && speedReg < 0) { stop = 1; } } __syncthreads(); if (tx==0 && ty ==0 && stop && xPos < WIDTH && yPos < HEIGHT){ stopCondition[parentThreadID] = 1; __threadfence(); } }
e0974f938ece0c4aeb091104d3da863d3383c970.cu
#include <iostream> #include <fstream> #include <iomanip> #include "image.h" #define BUF_SIZE 256 #define MAX_ITER 10000 using namespace std; class errorPNM { }; struct Color { unsigned char r; unsigned char g; unsigned char b; }; void readPNM(ifstream &file, char* buf); image<unsigned char>* loadPGM(const char* name); void savePPM(image<Color>* im, const char* name); Color randomColor(); __global__ void evolveContour(unsigned char* intensityDev, unsigned char* labelsDev, signed char* speedDev, signed char* phiDev, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds); __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound); __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH); __device__ volatile int stopCondition[1024]; int main(int argc, char* argv[]) { // Hard Coding Struct to print same way as other implementations (to compare images) struct Color colors[32]; for (int i = 0; i < 32; i++){ colors[i] = randomColor(); } colors[0].r = 204; colors[0].g = 0; colors[0].b = 0; colors[1].r = 204; colors[1].g = 102; colors[1].b = 0; colors[2].r = 204; colors[2].g = 204; colors[2].b = 0; colors[3].r = 102; colors[3].g = 204; colors[3].b = 0; colors[4].r = 0; colors[4].g = 204; colors[4].b = 0; colors[5].r = 0; colors[5].g = 204; colors[5].b = 102; colors[6].r = 0; colors[6].g = 204; colors[6].b = 204; colors[7].r = 0; colors[7].g = 102; colors[7].b = 204; colors[8].r = 0; colors[8].g = 0; colors[8].b = 204; colors[9].r = 102; colors[9].g = 0; colors[9].b = 204; // Parse command line arguments char* imageFile = NULL; char* labelFile = NULL; char* paramFile = NULL; int numRepetitions = 1; //bool produceOutput = false; for(int i=1; i<argc; i++) { if(strcmp(argv[i], "--image") == 0) { if(i+1 < argc) imageFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--labels") == 0) { if(i+1 < argc) labelFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--params") == 0) { if(i+1 < argc) paramFile = argv[++i]; else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } else if(strcmp(argv[i], "--reps") == 0) { if(i+1 < argc) { numRepetitions = atoi(argv[++i]); if(numRepetitions < 1) { cerr << "Number of repetitions must be greater than 0." << endl; exit(1); } } else { cerr << "Expected a filename after '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } //else if(strcmp(argv[i], "-o") == 0 || strcmp(argv[i], "--output") == 0) //produceOutput = true; else if(strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) { cout << "Usage: " << argv[0] << " [OPTIONS] --image <file> --labels <file> --params <file>" << endl; cout << "The order of switches does not matter so long as each one is immediately followed by its appropriate" << endl; cout << "argument (if one is required).\n" << endl; cout << "Utilizes a massively parallelized level set algorithm to segment the desired region of interest in" << endl; cout << "a grayscale image based on a given intensity range. Developed by Brett Daley as part of the NUPAR" << endl; cout << "benchmark suite.\n" << endl; cout << "Required arguments:" << endl; cout << " --image <file> Grayscale image to be segmented (intensities must be between 0 and 255)." << endl; cout << " --labels <file> Stripe-Based Connected Components Labeling output file. Used to seed the" << endl; cout << " initial contour." << endl; cout << " --params <file> Text file requiring the following format:" << endl; cout << " <Target label>" << endl; cout << " <Lower intensity bound>" << endl; cout << " <Upper intensity bound>" << endl; cout << " ..." << endl; cout << " Having multiple sets of three lines will segment the image multiple times according" << endl; cout << " to the different parameters. Utilizes dynamic parallelism." << endl; cout << "Options:" << endl; cout << " --reps <number> Run the program the specified number of times, enabling concurrent kernel execution" << endl; cout << " via Hyper-Q. Useful for performance benchmarking. [Default: 1]" << endl; cout << " -o, --output Output an RGB image for each target label specified in the params file. Use MATLAB's" << endl; cout << " 'imshow' command to view." << endl; cout << " -h, --help Display this information and exit." << endl; exit(0); } else { cerr << "Did not recognize '" << argv[i] << "'. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } } if(imageFile == NULL || labelFile == NULL || paramFile == NULL) { cerr << "Missing one or more arguments. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } // Initialize timers, start the runtime timer cudaEvent_t startTime1, startTime2, stopTime1, stopTime2; cudaEventCreate(&startTime1); cudaEventCreate(&startTime2); cudaEventCreate(&stopTime1); cudaEventCreate(&stopTime2); float elapsedTime1, elapsedTime2; cudaEventRecord(startTime1, 0); // Load image, send to GPU image<unsigned char>* input = loadPGM(imageFile); const int HEIGHT = input->height(); const int WIDTH = input->width(); const int SIZE = HEIGHT*WIDTH*sizeof(char); unsigned char* intensity = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&intensity[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* intensityDev = NULL; cudaMalloc((void**)&intensityDev, numRepetitions*SIZE); cudaMemcpyAsync(intensityDev, intensity, numRepetitions*SIZE, cudaMemcpyHostToDevice); // Load connected component labels, send to GPU input = loadPGM(labelFile); unsigned char* labels = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&labels[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* labelsDev = NULL; cudaMalloc((void **)&labelsDev, numRepetitions*SIZE); cudaMemcpyAsync(labelsDev, labels, numRepetitions*SIZE, cudaMemcpyHostToDevice); // Load parameters, send to GPU ifstream paramStream; paramStream.open(paramFile); if(paramStream.is_open() != true) { cerr << "Could not open '" << paramFile << "'." << endl; exit(1); } int targetLabels[1024]; int lowerIntensityBounds[1024]; int upperIntensityBounds[1024]; int numLabels = 0; while(paramStream.eof() == false) { char line[16]; paramStream.getline(line, 16); if(paramStream.eof() == true) break; if(numLabels % 3 == 0) targetLabels[numLabels/3] = strtol(line, NULL, 10); else if(numLabels % 3 == 1) lowerIntensityBounds[numLabels/3] = strtol(line, NULL, 10); else upperIntensityBounds[numLabels/3] = strtol(line, NULL, 10); numLabels++; } if(numLabels % 3 == 0) numLabels /= 3; else { cerr << "Number of lines in " << paramFile << " is not divisible by 3. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } paramStream.close(); int* targetLabelsDev = NULL; cudaMalloc((void**)&targetLabelsDev, numLabels*sizeof(int)); cudaMemcpyAsync(targetLabelsDev, targetLabels, numLabels*sizeof(int), cudaMemcpyHostToDevice); int* lowerIntensityBoundsDev = NULL; cudaMalloc((void**)&lowerIntensityBoundsDev, numLabels*sizeof(int)); cudaMemcpyAsync(lowerIntensityBoundsDev, lowerIntensityBounds, numLabels*sizeof(int), cudaMemcpyHostToDevice); int* upperIntensityBoundsDev = NULL; cudaMalloc((void**)&upperIntensityBoundsDev, numLabels*sizeof(int)); cudaMemcpyAsync(upperIntensityBoundsDev, upperIntensityBounds, numLabels*sizeof(int), cudaMemcpyHostToDevice); // Allocate arrays for speed and phi in GPU memory signed char* speedDev = NULL; signed char* phiDev = NULL; cudaMalloc((void**)&speedDev, numRepetitions*numLabels*SIZE); cudaMalloc((void**)&phiDev, numRepetitions*numLabels*SIZE); cudaDeviceSynchronize(); // Start the segmentation timer cudaEventRecord(startTime2, 0); // Launch kernel to begin image segmenation for(int i=0; i<numRepetitions; i++) { evolveContour<<<1, numLabels>>>(intensityDev, labelsDev, speedDev, phiDev, HEIGHT, WIDTH, targetLabelsDev, i, numLabels, lowerIntensityBoundsDev, upperIntensityBoundsDev); } cudaDeviceSynchronize(); // Stop the segmentation timer cudaEventRecord(stopTime2, 0); // Retrieve results from the GPU signed char* phi = new signed char[numRepetitions*numLabels*HEIGHT*WIDTH]; cudaMemcpy(phi, phiDev, numRepetitions*numLabels*SIZE, cudaMemcpyDeviceToHost); // Stop the runtime timer cudaEventRecord(stopTime1, 0); // Output RGB images (if command line switch was present) //if(produceOutput == true) if (true) { Color color; srand(1000); // Create 1 image for all labels image<Color> output = image<Color>(WIDTH, HEIGHT, true); image<Color>* im = &output; // Initialize image (same as input) for(int i = 0 ; i < HEIGHT ; i++) { for(int j = 0 ; j < WIDTH ; j++){ color.r = intensity[i*WIDTH+j]; color.g = intensity[i*WIDTH+j]; color.b = intensity[i*WIDTH+j]; im->access[i][j] = color; } } for(int k = 0 ; k < numLabels ; k++) { Color randomcolor = colors[k]; for(int i = 0 ; i < HEIGHT ; i++) { for(int j = 0 ; j < WIDTH ; j++){ if (phi[k*HEIGHT*WIDTH+i*WIDTH+j] == -1){ color = randomcolor; im->access[i][j] = color; } else if (phi[k*HEIGHT*WIDTH+i*WIDTH+j] == -3){ color.r = randomcolor.r + 50; color.g = randomcolor.g + 50; color.b = randomcolor.b + 50; im->access[i][j] = color; } } } } char filename[64]; sprintf(filename, "result.ppm"); savePPM(im, filename); } // Stop runtime timer and print times cudaEventElapsedTime(&elapsedTime1, startTime1, stopTime1); cudaEventElapsedTime(&elapsedTime2, startTime2, stopTime2); cout << "Kernel Execution Time: " << setprecision(6) << elapsedTime2 << " ms"<< endl; cout << "Total GPU Execution Time: " << setprecision(6) << elapsedTime1 << " ms"<< endl; // Free resources and end the program cudaEventDestroy(startTime1); cudaEventDestroy(stopTime1); cudaEventDestroy(startTime2); cudaEventDestroy(stopTime2); cudaFree(intensityDev); cudaFree(labelsDev); cudaFree(speedDev); cudaFree(phiDev); cudaFree(targetLabelsDev); cudaFree(lowerIntensityBoundsDev); cudaFree(upperIntensityBoundsDev); return 0; } image<unsigned char>* loadPGM(const char* name) { char buf[BUF_SIZE]; // Read header ifstream file(name, ios::in | ios::binary); readPNM(file, buf); if(strncmp(buf, "P5", 2)) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } readPNM(file, buf); int width = atoi(buf); readPNM(file, buf); int height = atoi(buf); readPNM(file, buf); if(atoi(buf) > UCHAR_MAX) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } // Read data image<unsigned char>* im = new image<unsigned char>(width, height); file.read((char*)imPtr(im, 0, 0), width*height*sizeof(unsigned char)); return im; } void readPNM(ifstream &file, char* buf) { char doc[BUF_SIZE]; char c; file >> c; while (c == '#') { file.getline(doc, BUF_SIZE); file >> c; } file.putback(c); file.width(BUF_SIZE); file >> buf; file.ignore(); } void savePPM(image<Color>* im, const char* name) { int width = im->width(); int height = im->height(); ofstream file(name, ios::out | ios::binary); file << "P6\n" << width << " " << height << "\n" << UCHAR_MAX << "\n"; file.write((char*)imPtr(im, 0, 0), width*height*sizeof(Color)); } Color randomColor() { Color c; c.r = (unsigned char) rand(); c.g = (unsigned char) rand(); c.b = (unsigned char) rand(); return c; } __global__ void evolveContour(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds) { int tid = threadIdx.x; intensity = &intensity[kernelID*HEIGHT*WIDTH]; labels = &labels[kernelID*HEIGHT*WIDTH]; speed = &speed[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; phi = &phi[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; dim3 dimGrid(WIDTH/30+1, HEIGHT/30+1); dim3 dimBlock(32, 32); initSpeedPhi<<<dimGrid, dimBlock>>>(intensity, labels, speed, phi, HEIGHT, WIDTH, targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid]); int numIterations = 0; stopCondition[tid] = 1; while(stopCondition[tid] && numIterations < MAX_ITER) { stopCondition[tid] = 0; numIterations++; dimGrid.x = WIDTH/30+1; dimGrid.y = HEIGHT/30+1; // Outward evolution switchIn<<<dimGrid, dimBlock>>>(speed, phi, HEIGHT, WIDTH); // Inward evolution switchOut<<<dimGrid, dimBlock>>>(speed, phi, HEIGHT, WIDTH); // Check stopping condition on every third iteration if(numIterations % 3 == 0) { dimGrid.x = WIDTH/32+1; dimGrid.y = HEIGHT/32+1; checkStopCondition<<<dimGrid, dimBlock>>>(speed, phi, tid, HEIGHT, WIDTH); cudaDeviceSynchronize(); } else { stopCondition[tid] = 1; } } printf("Target label %d (intensities: %d-%d) converged in %d iterations.\n", targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid], numIterations); } __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int intensityReg; int speedReg; int phiReg; __shared__ int labelsTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { labelsTile[ty][tx] = labels[yPos*WIDTH+xPos]; intensityReg = intensity[yPos*WIDTH+xPos]; } // Initialization if(tx > 0 && tx < 31 && ty > 0 && ty < 31 && xPos < WIDTH-1 && yPos < HEIGHT-1) { // Phi if(labelsTile[ty][tx] != targetLabel) { if(labelsTile[ty][tx-1] != targetLabel && labelsTile[ty][tx+1] != targetLabel && labelsTile[ty-1][tx] != targetLabel && labelsTile[ty+1][tx] != targetLabel) phiReg = 3; else phiReg = 1; } else { if(labelsTile[ty][tx-1] != targetLabel || labelsTile[ty][tx+1] != targetLabel || labelsTile[ty-1][tx] != targetLabel || labelsTile[ty+1][tx] != targetLabel) phiReg = -1; else phiReg = -3; } // Speed if(intensityReg >= lowerIntensityBound && intensityReg <= upperIntensityBound) speedReg = 1; else speedReg = -1; // Load data back into global memory speed[yPos*WIDTH+xPos] = speedReg; phi[yPos*WIDTH+xPos] = phiReg; } } __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lout and add them to Lin if(phiTile[ty][tx] == 1 && speedReg > 0) phiTile[ty][tx] = -1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == 3) { if(phiTile[ty][tx-1] == -1 || phiTile[ty][tx+1] == -1 || phiTile[ty-1][tx] == -1 || phiTile[ty+1][tx] == -1) phiTile[ty][tx] = 1; } // Eliminate redundant points in Lin if(phiTile[ty][tx] == -1) { if(phiTile[ty][tx-1] < 0 && phiTile[ty][tx+1] < 0 && phiTile[ty-1][tx] < 0 && phiTile[ty+1][tx] < 0) phiTile[ty][tx] = -3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lin and add them to Lout if(phiTile[ty][tx] == -1 && speedReg < 0) phiTile[ty][tx] = 1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == -3) { if(phiTile[ty][tx-1] == 1 || phiTile[ty][tx+1] == 1 || phiTile[ty-1][tx] == 1 || phiTile[ty+1][tx] == 1) phiTile[ty][tx] = -1; } // Eliminate redundant points if(phiTile[ty][tx] == 1) { if(phiTile[ty][tx-1] > 0 && phiTile[ty][tx+1] > 0 && phiTile[ty-1][tx] > 0 && phiTile[ty+1][tx] > 0) phiTile[ty][tx] = 3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 32*bx + tx; int yPos = 32*by + ty; signed char speedReg; signed char phiReg; __shared__ int stop; stop = 0; __syncthreads(); // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiReg = phi[yPos*WIDTH+xPos]; // Falsify stop condition if criteria are not met if(phiReg == 1 && speedReg > 0) { stop = 1; } else if(phiReg == -1 && speedReg < 0) { stop = 1; } } __syncthreads(); if (tx==0 && ty ==0 && stop && xPos < WIDTH && yPos < HEIGHT){ stopCondition[parentThreadID] = 1; __threadfence(); } }
c0acbfb20f80378e47c9b18995817dfb273f34e6.hip
// !!! This is a file automatically generated by hipify!!! // compile: system('nvcc -c ADMMcublasOver.cu'); // alternatively (on Windows): system('nvcc -c ADMMcublasOver.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64"'); #include "ADMMcublasOver.h" #include "math.h" #include "hip/hip_runtime.h" #include "rocblas.h" __global__ void soft_thres(float *x_out, float *u_out, float *z_out, float const * const lambda_value_in, int N_n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_n){ z_out[i] = fmaxf(x_out[i] + u_out[i] - lambda_value_in[0], 0.0f) - fmaxf(-x_out[i] - u_out[i] - lambda_value_in[0], 0.0f); } } __global__ void delta_abs_value(float *delta_abs_out, int N_n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_n){ delta_abs_out[i] = fabsf(delta_abs_out[i]); } } __global__ void determine_convergence(float *delta_abs_out, int max_index, float *z_old_out, int max_index_dual, float const * const tol_value_in, bool *conv_bool_out) { if (delta_abs_out[max_index-1] < tol_value_in[0] && z_old_out[max_index_dual-1] < tol_value_in[0]){ // 1-based index returned by hipblasIsamax conv_bool_out[0] = true; } else { conv_bool_out[0] = false; } } void ADMM_cublas_over(int N_i, int N_j, int N_n, int N_batch, int n_iter_max, float *z_in_host, float *u_in_host, float *lambda_value_in_host, float *Atb_active_in_host, float *At_LU_inv_in_host, float *A_in_host, float *tol_value_in_host, float *z_host_out, float *u_host_out, bool *error_flag_max_iter) { float *dz_out; hipMalloc(&dz_out, N_n*sizeof(float)); float *du_out; hipMalloc(&du_out, N_n*sizeof(float)); float *dlambda_value_in; hipMalloc(&dlambda_value_in, sizeof(float)); float *dAtb_active_in; hipMalloc(&dAtb_active_in, N_n*sizeof(float)); float *dAt_LU_inv_in; hipMalloc(&dAt_LU_inv_in, (N_i*N_j)*sizeof(float)); float *dA_in; hipMalloc(&dA_in, (N_i*N_j)*sizeof(float)); float *dtol_value_in; hipMalloc(&dtol_value_in, sizeof(float)); hipMemcpy(dz_out, z_in_host, N_n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(du_out, u_in_host, N_n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dlambda_value_in, lambda_value_in_host, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dAtb_active_in, Atb_active_in_host, N_n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dAt_LU_inv_in, At_LU_inv_in_host, (N_i*N_j)*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dA_in, A_in_host, (N_i*N_j)*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dtol_value_in, tol_value_in_host, sizeof(float), hipMemcpyHostToDevice); float *ddelta_abs_out; hipMalloc(&ddelta_abs_out, N_n*sizeof(float)); float *dq_out; hipMalloc(&dq_out, N_n*sizeof(float)); float *dAq_out; hipMalloc(&dAq_out, (N_i*N_batch)*sizeof(float)); float *dx_out; hipMalloc(&dx_out, N_n*sizeof(float)); float *dz_old_out; hipMalloc(&dz_old_out, N_n*sizeof(float)); bool *dconv_bool_out; hipMalloc(&dconv_bool_out, sizeof(bool)); float scalar_p1; float scalar_0; float scalar_m1; int const threadsPerBlock = 256; int blocksPerGrid; int max_index; int max_index_dual; bool *conv_bool_host = new bool[1]; int iter_no; scalar_p1 = 1.0f; scalar_0 = 0.0f; scalar_m1 = -1.0f; blocksPerGrid = (N_n + threadsPerBlock - 1) / threadsPerBlock; hipblasHandle_t handle; hipblasCreate(&handle); for (iter_no = 0; iter_no < n_iter_max; iter_no++) { // ADMM hipblasScopy(handle, N_n, dz_out, 1, dq_out, 1); hipblasSaxpy(handle, N_n, &scalar_m1, du_out, 1, dq_out, 1); hipblasSaxpy(handle, N_n, &scalar_p1, dAtb_active_in, 1, dq_out, 1); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N_i, N_batch, N_j, &scalar_p1, dA_in, N_i, dq_out, N_j, &scalar_0, dAq_out, N_i); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N_j, N_batch, N_i, &scalar_m1, dAt_LU_inv_in, N_j, dAq_out, N_i, &scalar_0, dx_out, N_j); hipblasSaxpy(handle, N_n, &scalar_p1, dq_out, 1, dx_out, 1); hipblasScopy(handle, N_n, dz_out, 1, dz_old_out, 1); hipLaunchKernelGGL(( soft_thres), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dx_out, du_out, dz_out, dlambda_value_in, N_n); hipblasSaxpy(handle, N_n, &scalar_p1, dx_out, 1, du_out, 1); hipblasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, du_out, 1); // prim. conv hipblasScopy(handle, N_n, dx_out, 1, ddelta_abs_out, 1); hipblasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, ddelta_abs_out, 1); hipLaunchKernelGGL(( delta_abs_value), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ddelta_abs_out, N_n); hipblasIsamax(handle, N_n, ddelta_abs_out, 1, &max_index); // dual conv hipblasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, dz_old_out, 1); hipLaunchKernelGGL(( delta_abs_value), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dz_old_out, N_n); hipblasIsamax(handle, N_n, dz_old_out, 1, &max_index_dual); // both conv hipLaunchKernelGGL(( determine_convergence), dim3(1),dim3(1), 0, 0, ddelta_abs_out, max_index, dz_old_out, max_index_dual, dtol_value_in, dconv_bool_out); hipMemcpy(conv_bool_host, dconv_bool_out, sizeof(bool), hipMemcpyDeviceToHost); if (conv_bool_host[0]) { break; } if (iter_no == (n_iter_max - 1)){ error_flag_max_iter[0] = true; } } hipMemcpy(z_host_out, dz_out, N_n*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(u_host_out, du_out, N_n*sizeof(float), hipMemcpyDeviceToHost); hipFree(dz_out); hipFree(du_out); hipFree(ddelta_abs_out); hipFree(dq_out); hipFree(dAq_out); hipFree(dx_out); hipFree(dz_old_out); hipFree(dconv_bool_out); hipFree(dlambda_value_in); hipFree(dAtb_active_in); hipFree(dAt_LU_inv_in); hipFree(dA_in); hipFree(dtol_value_in); }
c0acbfb20f80378e47c9b18995817dfb273f34e6.cu
// compile: system('nvcc -c ADMMcublasOver.cu'); // alternatively (on Windows): system('nvcc -c ADMMcublasOver.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64"'); #include "ADMMcublasOver.h" #include "math.h" #include "cuda_runtime.h" #include "cublas_v2.h" __global__ void soft_thres(float *x_out, float *u_out, float *z_out, float const * const lambda_value_in, int N_n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_n){ z_out[i] = fmaxf(x_out[i] + u_out[i] - lambda_value_in[0], 0.0f) - fmaxf(-x_out[i] - u_out[i] - lambda_value_in[0], 0.0f); } } __global__ void delta_abs_value(float *delta_abs_out, int N_n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_n){ delta_abs_out[i] = fabsf(delta_abs_out[i]); } } __global__ void determine_convergence(float *delta_abs_out, int max_index, float *z_old_out, int max_index_dual, float const * const tol_value_in, bool *conv_bool_out) { if (delta_abs_out[max_index-1] < tol_value_in[0] && z_old_out[max_index_dual-1] < tol_value_in[0]){ // 1-based index returned by cublasIsamax conv_bool_out[0] = true; } else { conv_bool_out[0] = false; } } void ADMM_cublas_over(int N_i, int N_j, int N_n, int N_batch, int n_iter_max, float *z_in_host, float *u_in_host, float *lambda_value_in_host, float *Atb_active_in_host, float *At_LU_inv_in_host, float *A_in_host, float *tol_value_in_host, float *z_host_out, float *u_host_out, bool *error_flag_max_iter) { float *dz_out; cudaMalloc(&dz_out, N_n*sizeof(float)); float *du_out; cudaMalloc(&du_out, N_n*sizeof(float)); float *dlambda_value_in; cudaMalloc(&dlambda_value_in, sizeof(float)); float *dAtb_active_in; cudaMalloc(&dAtb_active_in, N_n*sizeof(float)); float *dAt_LU_inv_in; cudaMalloc(&dAt_LU_inv_in, (N_i*N_j)*sizeof(float)); float *dA_in; cudaMalloc(&dA_in, (N_i*N_j)*sizeof(float)); float *dtol_value_in; cudaMalloc(&dtol_value_in, sizeof(float)); cudaMemcpy(dz_out, z_in_host, N_n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(du_out, u_in_host, N_n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dlambda_value_in, lambda_value_in_host, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dAtb_active_in, Atb_active_in_host, N_n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dAt_LU_inv_in, At_LU_inv_in_host, (N_i*N_j)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dA_in, A_in_host, (N_i*N_j)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dtol_value_in, tol_value_in_host, sizeof(float), cudaMemcpyHostToDevice); float *ddelta_abs_out; cudaMalloc(&ddelta_abs_out, N_n*sizeof(float)); float *dq_out; cudaMalloc(&dq_out, N_n*sizeof(float)); float *dAq_out; cudaMalloc(&dAq_out, (N_i*N_batch)*sizeof(float)); float *dx_out; cudaMalloc(&dx_out, N_n*sizeof(float)); float *dz_old_out; cudaMalloc(&dz_old_out, N_n*sizeof(float)); bool *dconv_bool_out; cudaMalloc(&dconv_bool_out, sizeof(bool)); float scalar_p1; float scalar_0; float scalar_m1; int const threadsPerBlock = 256; int blocksPerGrid; int max_index; int max_index_dual; bool *conv_bool_host = new bool[1]; int iter_no; scalar_p1 = 1.0f; scalar_0 = 0.0f; scalar_m1 = -1.0f; blocksPerGrid = (N_n + threadsPerBlock - 1) / threadsPerBlock; cublasHandle_t handle; cublasCreate(&handle); for (iter_no = 0; iter_no < n_iter_max; iter_no++) { // ADMM cublasScopy(handle, N_n, dz_out, 1, dq_out, 1); cublasSaxpy(handle, N_n, &scalar_m1, du_out, 1, dq_out, 1); cublasSaxpy(handle, N_n, &scalar_p1, dAtb_active_in, 1, dq_out, 1); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N_i, N_batch, N_j, &scalar_p1, dA_in, N_i, dq_out, N_j, &scalar_0, dAq_out, N_i); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N_j, N_batch, N_i, &scalar_m1, dAt_LU_inv_in, N_j, dAq_out, N_i, &scalar_0, dx_out, N_j); cublasSaxpy(handle, N_n, &scalar_p1, dq_out, 1, dx_out, 1); cublasScopy(handle, N_n, dz_out, 1, dz_old_out, 1); soft_thres<<<blocksPerGrid, threadsPerBlock>>>(dx_out, du_out, dz_out, dlambda_value_in, N_n); cublasSaxpy(handle, N_n, &scalar_p1, dx_out, 1, du_out, 1); cublasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, du_out, 1); // prim. conv cublasScopy(handle, N_n, dx_out, 1, ddelta_abs_out, 1); cublasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, ddelta_abs_out, 1); delta_abs_value<<<blocksPerGrid, threadsPerBlock>>>(ddelta_abs_out, N_n); cublasIsamax(handle, N_n, ddelta_abs_out, 1, &max_index); // dual conv cublasSaxpy(handle, N_n, &scalar_m1, dz_out, 1, dz_old_out, 1); delta_abs_value<<<blocksPerGrid, threadsPerBlock>>>(dz_old_out, N_n); cublasIsamax(handle, N_n, dz_old_out, 1, &max_index_dual); // both conv determine_convergence<<<1,1>>>(ddelta_abs_out, max_index, dz_old_out, max_index_dual, dtol_value_in, dconv_bool_out); cudaMemcpy(conv_bool_host, dconv_bool_out, sizeof(bool), cudaMemcpyDeviceToHost); if (conv_bool_host[0]) { break; } if (iter_no == (n_iter_max - 1)){ error_flag_max_iter[0] = true; } } cudaMemcpy(z_host_out, dz_out, N_n*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(u_host_out, du_out, N_n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dz_out); cudaFree(du_out); cudaFree(ddelta_abs_out); cudaFree(dq_out); cudaFree(dAq_out); cudaFree(dx_out); cudaFree(dz_old_out); cudaFree(dconv_bool_out); cudaFree(dlambda_value_in); cudaFree(dAtb_active_in); cudaFree(dAt_LU_inv_in); cudaFree(dA_in); cudaFree(dtol_value_in); }
ae7ac296c062b8d0900c0cb81296c3c575e7beb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <hiprand/hiprand.h> using namespace std; #include <hiprand/hiprand.h> struct random_d_array { float *data; int n; random_d_array(int n) :n{n} { hipMalloc((void**)&data, n*sizeof(float)); hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandGenerateUniform(gen, data, n); } ~random_d_array() { hipFree(&data); } }; __global__ void copy1(float *a, float *b, int n) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[id]; } __global__ void copy2(float *a, float *b, int n, int offset) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id + offset) % n]; } __global__ void copy3(float *a, float *b, int n, int stride) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id * stride) % n]; } float call1(float *a, float *b, int n, int blockSize, int gridSize, int i) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( copy1), dim3(gridSize),dim3(blockSize), 0, 0, a, b, n); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout << "copy1: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call2(float *a, float *b, int n, int offset, int blockSize, int gridSize, int i) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( copy2), dim3(gridSize),dim3(blockSize), 0, 0, a, b, n, offset); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout << "copy2: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call3(float *a, float *b, int n, int stride, int blockSize, int gridSize, int i) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( copy2), dim3(gridSize),dim3(blockSize), 0, 0, a, b, n, stride); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout << "copy3: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } int main() { const int N = 1024*1024*1024; const int iterations = 10; const int blockSize = 32; const int gridSize = (N + blockSize - 1)/blockSize; random_d_array a(N); random_d_array b(N); float average = 0.0; cout << "============= coalesced read ==============" << endl; call1(a.data, b.data, N, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call1(a.data, b.data, N, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= offset read =================" << endl; average = 0.0; const int offset = 17; call2(a.data, b.data, N, offset, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call2(a.data, b.data, N, offset, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= strided read ================" << endl; average = 0.0; const int stride = 17; call3(a.data, b.data, N, stride, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call3(a.data, b.data, N, stride, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; }
ae7ac296c062b8d0900c0cb81296c3c575e7beb1.cu
#include <iostream> #include <curand.h> using namespace std; #include <curand.h> struct random_d_array { float *data; int n; random_d_array(int n) :n{n} { cudaMalloc((void**)&data, n*sizeof(float)); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandGenerateUniform(gen, data, n); } ~random_d_array() { cudaFree(&data); } }; __global__ void copy1(float *a, float *b, int n) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[id]; } __global__ void copy2(float *a, float *b, int n, int offset) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id + offset) % n]; } __global__ void copy3(float *a, float *b, int n, int stride) { int id = threadIdx.x + blockDim.x * blockIdx.x; if(id < n) a[id] = b[(id * stride) % n]; } float call1(float *a, float *b, int n, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy1<<<gridSize,blockSize>>>(a, b, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy1: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call2(float *a, float *b, int n, int offset, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy2<<<gridSize,blockSize>>>(a, b, n, offset); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy2: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } float call3(float *a, float *b, int n, int stride, int blockSize, int gridSize, int i) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); copy2<<<gridSize,blockSize>>>(a, b, n, stride); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout << "copy3: " << i << ": " << milliseconds << " ms" << endl; return milliseconds; } int main() { const int N = 1024*1024*1024; const int iterations = 10; const int blockSize = 32; const int gridSize = (N + blockSize - 1)/blockSize; random_d_array a(N); random_d_array b(N); float average = 0.0; cout << "============= coalesced read ==============" << endl; call1(a.data, b.data, N, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call1(a.data, b.data, N, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= offset read =================" << endl; average = 0.0; const int offset = 17; call2(a.data, b.data, N, offset, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call2(a.data, b.data, N, offset, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; cout << "============= strided read ================" << endl; average = 0.0; const int stride = 17; call3(a.data, b.data, N, stride, blockSize, gridSize, -1); for(int i = 0; i < iterations; ++i) average += call3(a.data, b.data, N, stride, blockSize, gridSize, i); average /= iterations; cout << "Average = " << average << endl; }
f8228ae92e32e22f58f96719bfd8ba87598f2717.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by raver119 on 30.11.17. // #include <ops/declarable/helpers/im2col.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// // input [bS, iC, iH, iW] is convoluted to output [bS, iC, kH, kW, oH, oW] template <typename T> __global__ static void im2colCuda(const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const double zeroPadValD) { T zeroPadVal = static_cast<T>(zeroPadValD); //Value to use when value is padding. Usually 0 but not always const auto im = reinterpret_cast<const T*>(in); auto col = reinterpret_cast<T*>(out); __shared__ Nd4jLong colLen, *colStrides, *imStrides, *colShape, *colIndices; __shared__ int iH, iW, colRank; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; colIndices = reinterpret_cast<Nd4jLong*>(shmem); colRank = shape::rank(outShapeInfo); colLen = shape::length(outShapeInfo); colShape = shape::shapeOf(const_cast<Nd4jLong*>(outShapeInfo)); colStrides = shape::stride(outShapeInfo); imStrides = shape::stride(inShapeInfo); iH = inShapeInfo[3]; iW = inShapeInfo[4]; } __syncthreads(); const auto colInd = blockIdx.x * blockDim.x + threadIdx.x; if(colInd >= colLen) return; const auto indexes = colIndices + threadIdx.x * colRank; shape::index2coords(colRank, colShape, colInd, colLen, indexes); const auto imh = (-pH + indexes[2] * dH) + indexes[4] * sH; const auto imw = (-pW + indexes[3] * dW) + indexes[5] * sW; const auto colBuff = col + indexes[0]*colStrides[0] + indexes[1]*colStrides[1] + indexes[2]*colStrides[2] + indexes[3]*colStrides[3] + indexes[4]*colStrides[4] + indexes[5]*colStrides[5]; const auto imBuff = im + indexes[0]*imStrides[0] + indexes[1]*imStrides[1] + imh*imStrides[2] + imw*imStrides[3]; if (static_cast<unsigned>(imh) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imw) >= static_cast<unsigned>(iW)) *colBuff = zeroPadVal; else *colBuff = *imBuff; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void im2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, nd4j::LaunchContext & context, const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, int kY, int kX, int sH, int sW, int pH, int pW, int dH, int dW, double zeroPadVal) { hipLaunchKernelGGL(( im2colCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), threadsPerBlock * sizeof(Nd4jLong) * 6 /* rank of out = 6 */, *context.getCudaStream(), in, out, inShapeInfo, outShapeInfo, kY, kX, sH, sW, pH, pW, dH, dW, zeroPadVal); } ////////////////////////////////////////////////////////////////////////// void im2col(nd4j::LaunchContext & context, const NDArray& in, NDArray& out, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const NDArray& arrZeroPadVal) { if(!in.isActualOnDeviceSide()) in.syncToDevice(); const int threadsPerBlock = 512; const int blocksPerGrid = (out.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_SINGLE_SELECTOR(out.dataType(), im2colCudaLauncher, (blocksPerGrid, threadsPerBlock, context, in.getSpecialBuffer(), out.getSpecialBuffer(), in.getSpecialShapeInfo(), out.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, arrZeroPadVal.e<double>(0)), FLOAT_TYPES); in.tickReadDevice(); out.tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void im2colCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, nd4j::LaunchContext & context, const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, const int kY, const int kX, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const double zeroPadVal), FLOAT_TYPES); } } }
f8228ae92e32e22f58f96719bfd8ba87598f2717.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by raver119 on 30.11.17. // #include <ops/declarable/helpers/im2col.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// // input [bS, iC, iH, iW] is convoluted to output [bS, iC, kH, kW, oH, oW] template <typename T> __global__ static void im2colCuda(const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const double zeroPadValD) { T zeroPadVal = static_cast<T>(zeroPadValD); //Value to use when value is padding. Usually 0 but not always const auto im = reinterpret_cast<const T*>(in); auto col = reinterpret_cast<T*>(out); __shared__ Nd4jLong colLen, *colStrides, *imStrides, *colShape, *colIndices; __shared__ int iH, iW, colRank; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; colIndices = reinterpret_cast<Nd4jLong*>(shmem); colRank = shape::rank(outShapeInfo); colLen = shape::length(outShapeInfo); colShape = shape::shapeOf(const_cast<Nd4jLong*>(outShapeInfo)); colStrides = shape::stride(outShapeInfo); imStrides = shape::stride(inShapeInfo); iH = inShapeInfo[3]; iW = inShapeInfo[4]; } __syncthreads(); const auto colInd = blockIdx.x * blockDim.x + threadIdx.x; if(colInd >= colLen) return; const auto indexes = colIndices + threadIdx.x * colRank; shape::index2coords(colRank, colShape, colInd, colLen, indexes); const auto imh = (-pH + indexes[2] * dH) + indexes[4] * sH; const auto imw = (-pW + indexes[3] * dW) + indexes[5] * sW; const auto colBuff = col + indexes[0]*colStrides[0] + indexes[1]*colStrides[1] + indexes[2]*colStrides[2] + indexes[3]*colStrides[3] + indexes[4]*colStrides[4] + indexes[5]*colStrides[5]; const auto imBuff = im + indexes[0]*imStrides[0] + indexes[1]*imStrides[1] + imh*imStrides[2] + imw*imStrides[3]; if (static_cast<unsigned>(imh) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imw) >= static_cast<unsigned>(iW)) *colBuff = zeroPadVal; else *colBuff = *imBuff; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void im2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, nd4j::LaunchContext & context, const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, int kY, int kX, int sH, int sW, int pH, int pW, int dH, int dW, double zeroPadVal) { im2colCuda<T><<<blocksPerGrid, threadsPerBlock, threadsPerBlock * sizeof(Nd4jLong) * 6 /* rank of out = 6 */, *context.getCudaStream()>>>(in, out, inShapeInfo, outShapeInfo, kY, kX, sH, sW, pH, pW, dH, dW, zeroPadVal); } ////////////////////////////////////////////////////////////////////////// void im2col(nd4j::LaunchContext & context, const NDArray& in, NDArray& out, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const NDArray& arrZeroPadVal) { if(!in.isActualOnDeviceSide()) in.syncToDevice(); const int threadsPerBlock = 512; const int blocksPerGrid = (out.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_SINGLE_SELECTOR(out.dataType(), im2colCudaLauncher, (blocksPerGrid, threadsPerBlock, context, in.getSpecialBuffer(), out.getSpecialBuffer(), in.getSpecialShapeInfo(), out.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, arrZeroPadVal.e<double>(0)), FLOAT_TYPES); in.tickReadDevice(); out.tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void im2colCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, nd4j::LaunchContext & context, const void *in, void *out, const Nd4jLong *inShapeInfo, const Nd4jLong *outShapeInfo, const int kY, const int kX, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const double zeroPadVal), FLOAT_TYPES); } } }
7e8e68e0cdd29ece6078543eaf37aca9f65b2da8.hip
// !!! This is a file automatically generated by hipify!!! #include "trivialbinpackerPeakFinder.cuh" #define MATH_PI 3.14159265358979323846 #define MIN_FIRST_HARMONIC 3 #define MAX_FIRST_HARMONIC 20 #define FIRST_HARMONIC_MIN_HEIGHT 0.25 #define HARMONIC_WIDTH 2 #define HARMONIC_FALLOFF 0.9 #define DEFAULT_FIRST_HARMONIC 5 #define PEAK_FIND_DERIV_SMOOTHING_RANGE 15 #define HARMONIC_PERIOD_SEARCH_START 0.05f #define HARMONIC_PERIOD_SHORT_SEARCH_END 0.5f #define HARMONIC_PERIOD_LONG_OVERRIDE_BEGIN 0.8f #define HARMONIC_PERIOD_LONG_SEARCH_END 1.3f #define HARMONIC_PERIOD_SHORT_LONG_RATIO_THRESH 0.80 #define PROHIBIT_TWO_SHORTS_IN_ROW 1 #define ADJUST_CLOSE_VALLEY_THRESHOLD 1.5f #define STARTING_PEAK_THRESHOLD 0.5 #define EDGE_EXCLUSION 0//(1.1*PEAK_FIND_DERIV_SMOOTHING_RANGE) std::vector<float> peakFindDeriv1; std::vector<float> peakFindDeriv2; #define FOURIER_PATCH_WIDTH_POW 9 #define FOURIER_PATCH_WIDTH (pow(2,FOURIER_PATCH_WIDTH_POW)) std::vector<float> fourierRe; std::vector<float> fourierIm; std::vector<float> fourierAvg; /** * C++ implementation of FFT * * Source: http://paulbourke.net/miscellaneous/dft/ */ /* This computes an in-place complex-to-complex FFT x and y are the real and imaginary arrays of 2^m points. dir = 1 gives forward transform dir = -1 gives reverse transform */ inline void FFT(short int dir, long m, float *x, float *y) { long n, i, i1, j, k, i2, l, l1, l2; float c1, c2, tx, ty, t1, t2, u1, u2, z; /* Calculate the number of points */ n = 1; for (i = 0; i<m; i++) n *= 2; /* Do the bit reversal */ i2 = n >> 1; j = 0; for (i = 0; i<n - 1; i++) { if (i < j) { tx = x[i]; ty = y[i]; x[i] = x[j]; y[i] = y[j]; x[j] = tx; y[j] = ty; } k = i2; while (k <= j) { j -= k; k >>= 1; } j += k; } /* Compute the FFT */ c1 = -1.0; c2 = 0.0; l2 = 1; for (l = 0; l<m; l++) { l1 = l2; l2 <<= 1; u1 = 1.0; u2 = 0.0; for (j = 0; j<l1; j++) { for (i = j; i<n; i += l2) { i1 = i + l1; t1 = u1 * x[i1] - u2 * y[i1]; t2 = u1 * y[i1] + u2 * x[i1]; x[i1] = x[i] - t1; y[i1] = y[i] - t2; x[i] += t1; y[i] += t2; } z = u1 * c1 - u2 * c2; u2 = u1 * c2 + u2 * c1; u1 = z; } c2 = sqrt((1.0 - c1) / 2.0); if (dir == 1) c2 = -c2; c1 = sqrt((1.0 + c1) / 2.0); } /* Scaling for forward transform */ if (dir == 1) { for (i = 0; i<n; i++) { x[i] /= n; y[i] /= n; } } } size_t findFirstHarmonic(std::vector<float>* freq) { float maxAmp = 0; for (size_t i = MIN_FIRST_HARMONIC; i < MAX_FIRST_HARMONIC && i < freq->size(); i++) { maxAmp = ::max(maxAmp, (*freq)[i]); } for (size_t i = MIN_FIRST_HARMONIC; i < MAX_FIRST_HARMONIC && i < freq->size(); i++) { if ((*freq)[i] >= FIRST_HARMONIC_MIN_HEIGHT*maxAmp) { float backMin = 99999; float forwardMin = 99999; float backMax = -99999; float forwardMax = -99999; for (size_t j = (i > HARMONIC_WIDTH ? i - HARMONIC_WIDTH : 0); j < i; j++) { backMin = ::min(backMin, (*freq)[j]); backMax = ::max(backMax, (*freq)[j]); } for (size_t j = i + 1; j < i + HARMONIC_WIDTH + 1 && j < freq->size(); j++) { forwardMin = ::min(forwardMin, (*freq)[j]); forwardMax = ::max(forwardMax, (*freq)[j]); } if (backMin <= HARMONIC_FALLOFF*(*freq)[i] && forwardMin <= HARMONIC_FALLOFF*(*freq)[i] && backMax < (*freq)[i] && forwardMax < (*freq)[i]) return i; } } return 0; } //peakSaveFactor is the number to store in the peaks array; we can set it to different values on the forward and backwards passes to tell them apart float searchForPeaksAndValleys(std::vector<float>* deriv, std::vector<float>* peaks, size_t harmonicPeriod, int direction, float peakSaveFactor) { bool searchingForPeak = false; float begAbsMax = 0; float begMin = 99999; float begMax = -99999; for (size_t i = EDGE_EXCLUSION; i < harmonicPeriod; i++) { size_t pos = (direction > 0 ? i : deriv->size() - i - 1); begAbsMax = ::max(abs((*deriv)[pos]), begAbsMax); begMax = ::max((*deriv)[pos], begMax); begMin = ::min((*deriv)[pos], begMin); } size_t curIndex = EDGE_EXCLUSION; size_t lastCritPoint = EDGE_EXCLUSION; for (size_t i = EDGE_EXCLUSION; i < harmonicPeriod; i++) { size_t pos = (direction > 0 ? i : deriv->size() - i - 1); if (abs((*deriv)[pos]) > STARTING_PEAK_THRESHOLD*begAbsMax) { float localMax = abs((*deriv)[pos]); float originalHit = (*deriv)[pos]; float maxLoc = i; pos += direction; i++; while ((*deriv)[pos] * originalHit > 0) { if (abs((*deriv)[pos]) > localMax) { localMax = abs((*deriv)[pos]); maxLoc = i; } pos += direction; i++; } curIndex = maxLoc + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; lastCritPoint = maxLoc; size_t maxPos = (direction > 0 ? maxLoc : deriv->size() - maxLoc - 1); (*peaks)[maxPos] += ((*deriv)[maxPos] > 0 ? -peakSaveFactor : peakSaveFactor); searchingForPeak = ((*deriv)[maxPos] > 0 ? true : false); break; } } float begFauxCritVal = (searchingForPeak ? begMax : begMin); size_t currentBestIndex = 0; float currentBest = 0; size_t currentShortBestIndex = 0; float currentShortBest = 0; float periodMean = 0; float periodStdev = 0; size_t lastPeak = 0; size_t lastValley = 0; size_t countPeriods = 0; bool onLastInterval = false; bool foundCritPoint = false; bool lastIntervalWasShort = false; while (curIndex < (*deriv).size()) { size_t curPos = (direction > 0 ? curIndex : deriv->size() - curIndex - 1); //peaks are valleys in second derivative if ((searchingForPeak && (*deriv)[curPos] < currentBest) || (!searchingForPeak && (*deriv)[curPos] > currentBest)) { currentBest = (*deriv)[curPos]; currentBestIndex = curIndex; } if (curIndex < lastCritPoint + HARMONIC_PERIOD_SHORT_SEARCH_END*harmonicPeriod && ((searchingForPeak && (*deriv)[curPos] < currentShortBest) || (!searchingForPeak && (*deriv)[curPos] > currentShortBest))) { currentShortBest = (*deriv)[curPos]; currentShortBestIndex = curIndex; } curIndex++; if (curIndex > ::min((size_t)(lastCritPoint + HARMONIC_PERIOD_LONG_SEARCH_END*harmonicPeriod), (size_t)((*deriv).size() - 1))) { size_t lastCritPos = (direction > 0 ? lastCritPoint : deriv->size() - lastCritPoint - 1); float lastCritVal; if (lastCritPoint == 0) lastCritVal = begFauxCritVal; else lastCritVal = (*deriv)[lastCritPos]; if (currentShortBest != 0 && abs(currentShortBest - lastCritVal) > HARMONIC_PERIOD_SHORT_LONG_RATIO_THRESH*abs(currentBest - lastCritVal) && currentBestIndex > lastCritPoint + HARMONIC_PERIOD_LONG_OVERRIDE_BEGIN*harmonicPeriod && !(PROHIBIT_TWO_SHORTS_IN_ROW && lastIntervalWasShort)) { currentBest = currentShortBest; currentBestIndex = currentShortBestIndex; lastIntervalWasShort = true; } else lastIntervalWasShort = false; if (lastCritPoint != currentBestIndex) foundCritPoint = true; lastCritPoint = currentBestIndex; size_t currentBestPos = (direction > 0 ? currentBestIndex : deriv->size() - currentBestIndex - 1); if (currentBest != 0) { //if (currentBestIndex > (*deriv).size() / 2) (*peaks)[currentBestPos] += (searchingForPeak ? peakSaveFactor : -peakSaveFactor); float unsup = 0; if (searchingForPeak) { if (lastPeak > 0) unsup = currentBestIndex - lastPeak; lastPeak = currentBestIndex; } else { if (lastValley > 0) unsup = currentBestIndex - lastValley; lastValley = currentBestIndex; } if (unsup > 0) { periodMean += unsup; periodStdev += unsup*unsup; countPeriods++; } } else searchingForPeak = !searchingForPeak; currentBest = 0; currentShortBest = 0; if (lastCritPoint + harmonicPeriod < deriv->size() && foundCritPoint) { curIndex = lastCritPoint + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; searchingForPeak = !searchingForPeak; foundCritPoint = false; } else if (!onLastInterval && foundCritPoint) { curIndex = lastCritPoint + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; searchingForPeak = !searchingForPeak; onLastInterval = true; foundCritPoint = false; } else break; } } periodMean /= countPeriods; periodStdev /= countPeriods; periodStdev -= periodMean*periodMean; periodStdev = sqrt(periodStdev); return periodMean; } void adjustValleys(std::vector<float>* deriv, std::vector<float>* peaks, size_t harmonicPeriod) { std::vector<size_t> peakLocs; std::vector<size_t> valleyLocs; size_t lastPeak = peaks->size(); size_t lastValley = peaks->size(); float peakToValleyDist = 0; size_t numPeakToValleys = 0; float valleyToPeakDist = 0; size_t numValleyToPeaks = 0; float averagePeakHeight = 0; float averageValleyHeight = 0; for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] == 1) { peakLocs.push_back(i); if (lastValley < peaks->size()) { valleyToPeakDist += i - lastValley; numValleyToPeaks++; } averagePeakHeight += abs((*deriv)[i]); lastPeak = i; } else if ((*peaks)[i] == -1) { valleyLocs.push_back(i); if (lastPeak < peaks->size()) { peakToValleyDist += i - lastPeak; numPeakToValleys++; } averageValleyHeight += abs((*deriv)[i]); lastValley = i; } } peakToValleyDist /= numPeakToValleys; valleyToPeakDist /= numValleyToPeaks; averagePeakHeight /= peakLocs.size(); averageValleyHeight /= valleyLocs.size(); //bool waveInverted = (peakToValleyDist < valleyToPeakDist ? true : false); bool waveInverted = (averageValleyHeight > averagePeakHeight ? true : false); //can often be wrong if there is only one clear peak and valley per pulse, but that should be taken care of by the close/far adjustment (inversion by horizontal distance is better in those cases) std::vector<size_t>* primaryLocs = (!waveInverted ? &peakLocs : &valleyLocs); if (primaryLocs->size() == 0) return; std::vector<size_t> closeOptima; std::vector<size_t> farOptima; float averageCloseOptimum = 0; //actually sum of optima float averageFarOptimum = 0; for (size_t i = 0; i < primaryLocs->size() - 1; i++) { float farOptimum = (!waveInverted ? -9999 : 9999); size_t farOptimumLoc = (*primaryLocs)[i] + 1; float closeOptimum = (!waveInverted ? -9999 : 9999); size_t closeOptimumLoc = (*primaryLocs)[i] + 1; for (size_t j = (*primaryLocs)[i] + 1; j < (*primaryLocs)[i + 1]; j++) { if ((!waveInverted ? (*deriv)[j] > closeOptimum : (*deriv)[j] < closeOptimum) && j - (*primaryLocs)[i] < ((*primaryLocs)[i + 1] - (*primaryLocs)[i]) / 2) { closeOptimum = (*deriv)[j]; closeOptimumLoc = j; } if ((!waveInverted ? (*deriv)[j] > farOptimum : (*deriv)[j] < farOptimum) && j - (*primaryLocs)[i] >= ((*primaryLocs)[i + 1] - (*primaryLocs)[i]) / 2) { farOptimum = (*deriv)[j]; farOptimumLoc = j; } if ((*peaks)[j] == (!waveInverted ? -1 : 1)) { (*peaks)[j] = 0; } } closeOptima.push_back(closeOptimumLoc); farOptima.push_back(farOptimumLoc); averageCloseOptimum += abs(closeOptimum); averageFarOptimum += abs(farOptimum); } std::vector<size_t>* optima = (averageCloseOptimum > ADJUST_CLOSE_VALLEY_THRESHOLD*averageFarOptimum ? &closeOptima : &farOptima); for (size_t i = 0; i < optima->size(); i++) { (*peaks)[(*optima)[i]] = (!waveInverted ? -1 : 1); } } void findPeaksAndValleys(std::vector<float>* waveform, std::vector<float>* peaks) { peaks->clear(); peaks->resize(waveform->size()); computeDerivative(waveform, &peakFindDeriv1, PEAK_FIND_DERIV_SMOOTHING_RANGE); computeDerivative(&peakFindDeriv1, &peakFindDeriv2, PEAK_FIND_DERIV_SMOOTHING_RANGE); fourierRe.clear(); fourierRe.resize(FOURIER_PATCH_WIDTH); fourierIm.clear(); fourierIm.resize(FOURIER_PATCH_WIDTH); fourierAvg.clear(); fourierAvg.resize(FOURIER_PATCH_WIDTH); interpolate(&peakFindDeriv2, &fourierRe, FOURIER_PATCH_WIDTH); FFT(1, FOURIER_PATCH_WIDTH_POW, &fourierRe[0], &fourierIm[0]); for (size_t i = 0; i < fourierRe.size(); i++) { fourierRe[i] = sqrt(fourierRe[i] * fourierRe[i] + fourierIm[i] * fourierIm[i]); } interpolate(&fourierRe, &fourierAvg, FOURIER_PATCH_WIDTH*FOURIER_PATCH_WIDTH / peakFindDeriv2.size()); size_t deriv2FirstHarmonic = findFirstHarmonic(&fourierAvg); if (deriv2FirstHarmonic == 0) deriv2FirstHarmonic = DEFAULT_FIRST_HARMONIC; size_t harmonicPeriod = waveform->size() / deriv2FirstHarmonic; float forwardMean = searchForPeaksAndValleys(&peakFindDeriv2, peaks, harmonicPeriod, 1, 1.0f); float backwardMean = searchForPeaksAndValleys(&peakFindDeriv2, peaks, harmonicPeriod, -1, 0.1f); if (fabs(forwardMean - harmonicPeriod) < fabs(backwardMean - harmonicPeriod)) { for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] < -0.5f) (*peaks)[i] = -1.0f; else if ((*peaks)[i] > 0.5f) (*peaks)[i] = 1.0f; else (*peaks)[i] = 0.0f; } } else { for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] < -0.5f) (*peaks)[i] += 1.0f; else if ((*peaks)[i] > 0.5f) (*peaks)[i] -= 1.0f; (*peaks)[i] *= 10.0f; } } adjustValleys(&peakFindDeriv2, peaks, harmonicPeriod); }
7e8e68e0cdd29ece6078543eaf37aca9f65b2da8.cu
#include "trivialbinpackerPeakFinder.cuh" #define MATH_PI 3.14159265358979323846 #define MIN_FIRST_HARMONIC 3 #define MAX_FIRST_HARMONIC 20 #define FIRST_HARMONIC_MIN_HEIGHT 0.25 #define HARMONIC_WIDTH 2 #define HARMONIC_FALLOFF 0.9 #define DEFAULT_FIRST_HARMONIC 5 #define PEAK_FIND_DERIV_SMOOTHING_RANGE 15 #define HARMONIC_PERIOD_SEARCH_START 0.05f #define HARMONIC_PERIOD_SHORT_SEARCH_END 0.5f #define HARMONIC_PERIOD_LONG_OVERRIDE_BEGIN 0.8f #define HARMONIC_PERIOD_LONG_SEARCH_END 1.3f #define HARMONIC_PERIOD_SHORT_LONG_RATIO_THRESH 0.80 #define PROHIBIT_TWO_SHORTS_IN_ROW 1 #define ADJUST_CLOSE_VALLEY_THRESHOLD 1.5f #define STARTING_PEAK_THRESHOLD 0.5 #define EDGE_EXCLUSION 0//(1.1*PEAK_FIND_DERIV_SMOOTHING_RANGE) std::vector<float> peakFindDeriv1; std::vector<float> peakFindDeriv2; #define FOURIER_PATCH_WIDTH_POW 9 #define FOURIER_PATCH_WIDTH (pow(2,FOURIER_PATCH_WIDTH_POW)) std::vector<float> fourierRe; std::vector<float> fourierIm; std::vector<float> fourierAvg; /** * C++ implementation of FFT * * Source: http://paulbourke.net/miscellaneous/dft/ */ /* This computes an in-place complex-to-complex FFT x and y are the real and imaginary arrays of 2^m points. dir = 1 gives forward transform dir = -1 gives reverse transform */ inline void FFT(short int dir, long m, float *x, float *y) { long n, i, i1, j, k, i2, l, l1, l2; float c1, c2, tx, ty, t1, t2, u1, u2, z; /* Calculate the number of points */ n = 1; for (i = 0; i<m; i++) n *= 2; /* Do the bit reversal */ i2 = n >> 1; j = 0; for (i = 0; i<n - 1; i++) { if (i < j) { tx = x[i]; ty = y[i]; x[i] = x[j]; y[i] = y[j]; x[j] = tx; y[j] = ty; } k = i2; while (k <= j) { j -= k; k >>= 1; } j += k; } /* Compute the FFT */ c1 = -1.0; c2 = 0.0; l2 = 1; for (l = 0; l<m; l++) { l1 = l2; l2 <<= 1; u1 = 1.0; u2 = 0.0; for (j = 0; j<l1; j++) { for (i = j; i<n; i += l2) { i1 = i + l1; t1 = u1 * x[i1] - u2 * y[i1]; t2 = u1 * y[i1] + u2 * x[i1]; x[i1] = x[i] - t1; y[i1] = y[i] - t2; x[i] += t1; y[i] += t2; } z = u1 * c1 - u2 * c2; u2 = u1 * c2 + u2 * c1; u1 = z; } c2 = sqrt((1.0 - c1) / 2.0); if (dir == 1) c2 = -c2; c1 = sqrt((1.0 + c1) / 2.0); } /* Scaling for forward transform */ if (dir == 1) { for (i = 0; i<n; i++) { x[i] /= n; y[i] /= n; } } } size_t findFirstHarmonic(std::vector<float>* freq) { float maxAmp = 0; for (size_t i = MIN_FIRST_HARMONIC; i < MAX_FIRST_HARMONIC && i < freq->size(); i++) { maxAmp = std::max(maxAmp, (*freq)[i]); } for (size_t i = MIN_FIRST_HARMONIC; i < MAX_FIRST_HARMONIC && i < freq->size(); i++) { if ((*freq)[i] >= FIRST_HARMONIC_MIN_HEIGHT*maxAmp) { float backMin = 99999; float forwardMin = 99999; float backMax = -99999; float forwardMax = -99999; for (size_t j = (i > HARMONIC_WIDTH ? i - HARMONIC_WIDTH : 0); j < i; j++) { backMin = std::min(backMin, (*freq)[j]); backMax = std::max(backMax, (*freq)[j]); } for (size_t j = i + 1; j < i + HARMONIC_WIDTH + 1 && j < freq->size(); j++) { forwardMin = std::min(forwardMin, (*freq)[j]); forwardMax = std::max(forwardMax, (*freq)[j]); } if (backMin <= HARMONIC_FALLOFF*(*freq)[i] && forwardMin <= HARMONIC_FALLOFF*(*freq)[i] && backMax < (*freq)[i] && forwardMax < (*freq)[i]) return i; } } return 0; } //peakSaveFactor is the number to store in the peaks array; we can set it to different values on the forward and backwards passes to tell them apart float searchForPeaksAndValleys(std::vector<float>* deriv, std::vector<float>* peaks, size_t harmonicPeriod, int direction, float peakSaveFactor) { bool searchingForPeak = false; float begAbsMax = 0; float begMin = 99999; float begMax = -99999; for (size_t i = EDGE_EXCLUSION; i < harmonicPeriod; i++) { size_t pos = (direction > 0 ? i : deriv->size() - i - 1); begAbsMax = std::max(abs((*deriv)[pos]), begAbsMax); begMax = std::max((*deriv)[pos], begMax); begMin = std::min((*deriv)[pos], begMin); } size_t curIndex = EDGE_EXCLUSION; size_t lastCritPoint = EDGE_EXCLUSION; for (size_t i = EDGE_EXCLUSION; i < harmonicPeriod; i++) { size_t pos = (direction > 0 ? i : deriv->size() - i - 1); if (abs((*deriv)[pos]) > STARTING_PEAK_THRESHOLD*begAbsMax) { float localMax = abs((*deriv)[pos]); float originalHit = (*deriv)[pos]; float maxLoc = i; pos += direction; i++; while ((*deriv)[pos] * originalHit > 0) { if (abs((*deriv)[pos]) > localMax) { localMax = abs((*deriv)[pos]); maxLoc = i; } pos += direction; i++; } curIndex = maxLoc + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; lastCritPoint = maxLoc; size_t maxPos = (direction > 0 ? maxLoc : deriv->size() - maxLoc - 1); (*peaks)[maxPos] += ((*deriv)[maxPos] > 0 ? -peakSaveFactor : peakSaveFactor); searchingForPeak = ((*deriv)[maxPos] > 0 ? true : false); break; } } float begFauxCritVal = (searchingForPeak ? begMax : begMin); size_t currentBestIndex = 0; float currentBest = 0; size_t currentShortBestIndex = 0; float currentShortBest = 0; float periodMean = 0; float periodStdev = 0; size_t lastPeak = 0; size_t lastValley = 0; size_t countPeriods = 0; bool onLastInterval = false; bool foundCritPoint = false; bool lastIntervalWasShort = false; while (curIndex < (*deriv).size()) { size_t curPos = (direction > 0 ? curIndex : deriv->size() - curIndex - 1); //peaks are valleys in second derivative if ((searchingForPeak && (*deriv)[curPos] < currentBest) || (!searchingForPeak && (*deriv)[curPos] > currentBest)) { currentBest = (*deriv)[curPos]; currentBestIndex = curIndex; } if (curIndex < lastCritPoint + HARMONIC_PERIOD_SHORT_SEARCH_END*harmonicPeriod && ((searchingForPeak && (*deriv)[curPos] < currentShortBest) || (!searchingForPeak && (*deriv)[curPos] > currentShortBest))) { currentShortBest = (*deriv)[curPos]; currentShortBestIndex = curIndex; } curIndex++; if (curIndex > std::min((size_t)(lastCritPoint + HARMONIC_PERIOD_LONG_SEARCH_END*harmonicPeriod), (size_t)((*deriv).size() - 1))) { size_t lastCritPos = (direction > 0 ? lastCritPoint : deriv->size() - lastCritPoint - 1); float lastCritVal; if (lastCritPoint == 0) lastCritVal = begFauxCritVal; else lastCritVal = (*deriv)[lastCritPos]; if (currentShortBest != 0 && abs(currentShortBest - lastCritVal) > HARMONIC_PERIOD_SHORT_LONG_RATIO_THRESH*abs(currentBest - lastCritVal) && currentBestIndex > lastCritPoint + HARMONIC_PERIOD_LONG_OVERRIDE_BEGIN*harmonicPeriod && !(PROHIBIT_TWO_SHORTS_IN_ROW && lastIntervalWasShort)) { currentBest = currentShortBest; currentBestIndex = currentShortBestIndex; lastIntervalWasShort = true; } else lastIntervalWasShort = false; if (lastCritPoint != currentBestIndex) foundCritPoint = true; lastCritPoint = currentBestIndex; size_t currentBestPos = (direction > 0 ? currentBestIndex : deriv->size() - currentBestIndex - 1); if (currentBest != 0) { //if (currentBestIndex > (*deriv).size() / 2) (*peaks)[currentBestPos] += (searchingForPeak ? peakSaveFactor : -peakSaveFactor); float unsup = 0; if (searchingForPeak) { if (lastPeak > 0) unsup = currentBestIndex - lastPeak; lastPeak = currentBestIndex; } else { if (lastValley > 0) unsup = currentBestIndex - lastValley; lastValley = currentBestIndex; } if (unsup > 0) { periodMean += unsup; periodStdev += unsup*unsup; countPeriods++; } } else searchingForPeak = !searchingForPeak; currentBest = 0; currentShortBest = 0; if (lastCritPoint + harmonicPeriod < deriv->size() && foundCritPoint) { curIndex = lastCritPoint + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; searchingForPeak = !searchingForPeak; foundCritPoint = false; } else if (!onLastInterval && foundCritPoint) { curIndex = lastCritPoint + HARMONIC_PERIOD_SEARCH_START*harmonicPeriod; searchingForPeak = !searchingForPeak; onLastInterval = true; foundCritPoint = false; } else break; } } periodMean /= countPeriods; periodStdev /= countPeriods; periodStdev -= periodMean*periodMean; periodStdev = sqrt(periodStdev); return periodMean; } void adjustValleys(std::vector<float>* deriv, std::vector<float>* peaks, size_t harmonicPeriod) { std::vector<size_t> peakLocs; std::vector<size_t> valleyLocs; size_t lastPeak = peaks->size(); size_t lastValley = peaks->size(); float peakToValleyDist = 0; size_t numPeakToValleys = 0; float valleyToPeakDist = 0; size_t numValleyToPeaks = 0; float averagePeakHeight = 0; float averageValleyHeight = 0; for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] == 1) { peakLocs.push_back(i); if (lastValley < peaks->size()) { valleyToPeakDist += i - lastValley; numValleyToPeaks++; } averagePeakHeight += abs((*deriv)[i]); lastPeak = i; } else if ((*peaks)[i] == -1) { valleyLocs.push_back(i); if (lastPeak < peaks->size()) { peakToValleyDist += i - lastPeak; numPeakToValleys++; } averageValleyHeight += abs((*deriv)[i]); lastValley = i; } } peakToValleyDist /= numPeakToValleys; valleyToPeakDist /= numValleyToPeaks; averagePeakHeight /= peakLocs.size(); averageValleyHeight /= valleyLocs.size(); //bool waveInverted = (peakToValleyDist < valleyToPeakDist ? true : false); bool waveInverted = (averageValleyHeight > averagePeakHeight ? true : false); //can often be wrong if there is only one clear peak and valley per pulse, but that should be taken care of by the close/far adjustment (inversion by horizontal distance is better in those cases) std::vector<size_t>* primaryLocs = (!waveInverted ? &peakLocs : &valleyLocs); if (primaryLocs->size() == 0) return; std::vector<size_t> closeOptima; std::vector<size_t> farOptima; float averageCloseOptimum = 0; //actually sum of optima float averageFarOptimum = 0; for (size_t i = 0; i < primaryLocs->size() - 1; i++) { float farOptimum = (!waveInverted ? -9999 : 9999); size_t farOptimumLoc = (*primaryLocs)[i] + 1; float closeOptimum = (!waveInverted ? -9999 : 9999); size_t closeOptimumLoc = (*primaryLocs)[i] + 1; for (size_t j = (*primaryLocs)[i] + 1; j < (*primaryLocs)[i + 1]; j++) { if ((!waveInverted ? (*deriv)[j] > closeOptimum : (*deriv)[j] < closeOptimum) && j - (*primaryLocs)[i] < ((*primaryLocs)[i + 1] - (*primaryLocs)[i]) / 2) { closeOptimum = (*deriv)[j]; closeOptimumLoc = j; } if ((!waveInverted ? (*deriv)[j] > farOptimum : (*deriv)[j] < farOptimum) && j - (*primaryLocs)[i] >= ((*primaryLocs)[i + 1] - (*primaryLocs)[i]) / 2) { farOptimum = (*deriv)[j]; farOptimumLoc = j; } if ((*peaks)[j] == (!waveInverted ? -1 : 1)) { (*peaks)[j] = 0; } } closeOptima.push_back(closeOptimumLoc); farOptima.push_back(farOptimumLoc); averageCloseOptimum += abs(closeOptimum); averageFarOptimum += abs(farOptimum); } std::vector<size_t>* optima = (averageCloseOptimum > ADJUST_CLOSE_VALLEY_THRESHOLD*averageFarOptimum ? &closeOptima : &farOptima); for (size_t i = 0; i < optima->size(); i++) { (*peaks)[(*optima)[i]] = (!waveInverted ? -1 : 1); } } void findPeaksAndValleys(std::vector<float>* waveform, std::vector<float>* peaks) { peaks->clear(); peaks->resize(waveform->size()); computeDerivative(waveform, &peakFindDeriv1, PEAK_FIND_DERIV_SMOOTHING_RANGE); computeDerivative(&peakFindDeriv1, &peakFindDeriv2, PEAK_FIND_DERIV_SMOOTHING_RANGE); fourierRe.clear(); fourierRe.resize(FOURIER_PATCH_WIDTH); fourierIm.clear(); fourierIm.resize(FOURIER_PATCH_WIDTH); fourierAvg.clear(); fourierAvg.resize(FOURIER_PATCH_WIDTH); interpolate(&peakFindDeriv2, &fourierRe, FOURIER_PATCH_WIDTH); FFT(1, FOURIER_PATCH_WIDTH_POW, &fourierRe[0], &fourierIm[0]); for (size_t i = 0; i < fourierRe.size(); i++) { fourierRe[i] = sqrt(fourierRe[i] * fourierRe[i] + fourierIm[i] * fourierIm[i]); } interpolate(&fourierRe, &fourierAvg, FOURIER_PATCH_WIDTH*FOURIER_PATCH_WIDTH / peakFindDeriv2.size()); size_t deriv2FirstHarmonic = findFirstHarmonic(&fourierAvg); if (deriv2FirstHarmonic == 0) deriv2FirstHarmonic = DEFAULT_FIRST_HARMONIC; size_t harmonicPeriod = waveform->size() / deriv2FirstHarmonic; float forwardMean = searchForPeaksAndValleys(&peakFindDeriv2, peaks, harmonicPeriod, 1, 1.0f); float backwardMean = searchForPeaksAndValleys(&peakFindDeriv2, peaks, harmonicPeriod, -1, 0.1f); if (fabs(forwardMean - harmonicPeriod) < fabs(backwardMean - harmonicPeriod)) { for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] < -0.5f) (*peaks)[i] = -1.0f; else if ((*peaks)[i] > 0.5f) (*peaks)[i] = 1.0f; else (*peaks)[i] = 0.0f; } } else { for (size_t i = 0; i < peaks->size(); i++) { if ((*peaks)[i] < -0.5f) (*peaks)[i] += 1.0f; else if ((*peaks)[i] > 0.5f) (*peaks)[i] -= 1.0f; (*peaks)[i] *= 10.0f; } } adjustValleys(&peakFindDeriv2, peaks, harmonicPeriod); }
f19cd7d184ee59e68dc640a8cb54d2d26cc68c5d.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file check/core/solver.cu /// @brief Test nbfmm::Solver /// /// @author Mu Yang <emfomy@gmail.com> /// #include "solver.hpp" #include <algorithm> #include <numeric> #include <random> using namespace std; CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(TestNbfmmSolver, "Solver"); const float4 TestNbfmmSolver::position_limits = make_float4(0, -1, 8, 2); TestNbfmmSolver::TestNbfmmSolver() : solver(num_level, num_particle, position_limits) { // Allocate memory hipMalloc(&gpuptr_float2, num_particle * sizeof(float2)); hipMalloc(&gpuptr_float, num_particle * sizeof(float)); // Create random generator default_random_engine generator; uniform_real_distribution<float> rand_position_x(position_limits.x, position_limits.z); uniform_real_distribution<float> rand_position_y(position_limits.y, position_limits.w); exponential_distribution<float> rand_weight(1.0); uniform_int_distribution<int> rand_head(0, 255); // Generate position and weight for ( auto i = 0; i < num_particle; ++i ) { random_position[i].x = rand_position_x(generator); random_position[i].y = rand_position_y(generator); random_weight[i] = rand_weight(generator); } float2 (*cell_position)[base_dim][base_dim] = (float2(*)[base_dim][base_dim]) random_cell_position; float (*cell_weight)[base_dim][base_dim] = (float (*)[base_dim][base_dim]) random_cell_weight; for ( auto l = 0; l < num_level; ++l ) { int cell_size = 1 << l; for ( auto j = 0; j < base_dim; j += cell_size ) { for ( auto i = 0; i < base_dim; i += cell_size ) { cell_position[l][j][i].x = rand_position_x(generator); cell_position[l][j][i].y = rand_position_y(generator); cell_weight[l][j][i] = rand_weight(generator); } } } // Generate head random_head[0] = 0; int rand_sum = 0; for ( auto i = 1; i < num_cell_p1; ++i ) { rand_sum += rand_head(generator); random_head[i] = rand_sum * num_particle; } for ( auto i = 0; i < num_cell_p1; ++i ) { random_head[i] /= rand_sum; } // Create random permutation iota(random_perm, random_perm+num_particle, 0); random_shuffle(random_perm, random_perm+num_particle); // Generate index for ( auto i = 0; i < base_dim * base_dim; ++i ) { fill(random_index+random_head[i], random_index+random_head[i+1], make_int2(i % base_dim, i / base_dim)); } } TestNbfmmSolver::~TestNbfmmSolver() { hipFree(gpuptr_float2); hipFree(gpuptr_float); }
f19cd7d184ee59e68dc640a8cb54d2d26cc68c5d.cu
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file check/core/solver.cu /// @brief Test nbfmm::Solver /// /// @author Mu Yang <emfomy@gmail.com> /// #include "solver.hpp" #include <algorithm> #include <numeric> #include <random> using namespace std; CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(TestNbfmmSolver, "Solver"); const float4 TestNbfmmSolver::position_limits = make_float4(0, -1, 8, 2); TestNbfmmSolver::TestNbfmmSolver() : solver(num_level, num_particle, position_limits) { // Allocate memory cudaMalloc(&gpuptr_float2, num_particle * sizeof(float2)); cudaMalloc(&gpuptr_float, num_particle * sizeof(float)); // Create random generator default_random_engine generator; uniform_real_distribution<float> rand_position_x(position_limits.x, position_limits.z); uniform_real_distribution<float> rand_position_y(position_limits.y, position_limits.w); exponential_distribution<float> rand_weight(1.0); uniform_int_distribution<int> rand_head(0, 255); // Generate position and weight for ( auto i = 0; i < num_particle; ++i ) { random_position[i].x = rand_position_x(generator); random_position[i].y = rand_position_y(generator); random_weight[i] = rand_weight(generator); } float2 (*cell_position)[base_dim][base_dim] = (float2(*)[base_dim][base_dim]) random_cell_position; float (*cell_weight)[base_dim][base_dim] = (float (*)[base_dim][base_dim]) random_cell_weight; for ( auto l = 0; l < num_level; ++l ) { int cell_size = 1 << l; for ( auto j = 0; j < base_dim; j += cell_size ) { for ( auto i = 0; i < base_dim; i += cell_size ) { cell_position[l][j][i].x = rand_position_x(generator); cell_position[l][j][i].y = rand_position_y(generator); cell_weight[l][j][i] = rand_weight(generator); } } } // Generate head random_head[0] = 0; int rand_sum = 0; for ( auto i = 1; i < num_cell_p1; ++i ) { rand_sum += rand_head(generator); random_head[i] = rand_sum * num_particle; } for ( auto i = 0; i < num_cell_p1; ++i ) { random_head[i] /= rand_sum; } // Create random permutation iota(random_perm, random_perm+num_particle, 0); random_shuffle(random_perm, random_perm+num_particle); // Generate index for ( auto i = 0; i < base_dim * base_dim; ++i ) { fill(random_index+random_head[i], random_index+random_head[i+1], make_int2(i % base_dim, i / base_dim)); } } TestNbfmmSolver::~TestNbfmmSolver() { cudaFree(gpuptr_float2); cudaFree(gpuptr_float); }
4cf22fe3ad590925a6e93409ac982055421efbe8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include <stdio.h> #include <string> #include <vector> #include "hipcub/hipcub.hpp" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; namespace { #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) int const kThreadsPerBlock = sizeof(uint64_t) * 8; static const double kBBoxClipDefault = ::log(1000.0 / 16.0); struct RangeInitFunctor { int start_; int delta_; int *out_; __device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; } }; template <typename T> static void SortDescending(const platform::CUDADeviceContext &ctx, const Tensor &value, Tensor *value_out, Tensor *index_out) { int num = static_cast<int>(value.numel()); Tensor index_in_t; int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num); for_range(RangeInitFunctor{0, 1, idx_in}); int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace()); const T *keys_in = value.data<T>(); T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); // Allocate temporary storage auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); auto d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation hipcub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); } template <typename T> struct BoxDecodeAndClipFunctor { const T *anchor; const T *deltas; const T *var; const int *index; const T *im_info; T *proposals; BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var, const int *index, const T *im_info, T *proposals) : anchor(anchor), deltas(deltas), var(var), index(index), im_info(im_info), proposals(proposals) {} T bbox_clip_default{static_cast<T>(kBBoxClipDefault)}; __device__ void operator()(size_t i) { int k = index[i] * 4; T axmin = anchor[k]; T aymin = anchor[k + 1]; T axmax = anchor[k + 2]; T aymax = anchor[k + 3]; T w = axmax - axmin + 1.0; T h = aymax - aymin + 1.0; T cx = axmin + 0.5 * w; T cy = aymin + 0.5 * h; T dxmin = deltas[k]; T dymin = deltas[k + 1]; T dxmax = deltas[k + 2]; T dymax = deltas[k + 3]; T d_cx, d_cy, d_w, d_h; if (var) { d_cx = cx + dxmin * w * var[k]; d_cy = cy + dymin * h * var[k + 1]; d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w; d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h; } else { d_cx = cx + dxmin * w; d_cy = cy + dymin * h; d_w = exp(Min(dxmax, bbox_clip_default)) * w; d_h = exp(Min(dymax, bbox_clip_default)) * h; } T oxmin = d_cx - d_w * 0.5; T oymin = d_cy - d_h * 0.5; T oxmax = d_cx + d_w * 0.5 - 1.; T oymax = d_cy + d_h * 0.5 - 1.; proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.); proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.); proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.); proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.); } __device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; } __device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; } }; template <typename T, int BlockSize> static __global__ void FilterBBoxes(const T *bboxes, const T *im_info, const T min_size, const int num, int *keep_num, int *keep) { T im_h = im_info[0]; T im_w = im_info[1]; T im_scale = im_info[2]; int cnt = 0; __shared__ int keep_index[BlockSize]; CUDA_1D_KERNEL_LOOP(i, num) { keep_index[threadIdx.x] = -1; __syncthreads(); int k = i * 4; T xmin = bboxes[k]; T ymin = bboxes[k + 1]; T xmax = bboxes[k + 2]; T ymax = bboxes[k + 3]; T w = xmax - xmin + 1.0; T h = ymax - ymin + 1.0; T cx = xmin + w / 2.; T cy = ymin + h / 2.; T w_s = (xmax - xmin) / im_scale + 1.; T h_s = (ymax - ymin) / im_scale + 1.; if (w_s >= min_size && h_s >= min_size && cx <= im_w && cy <= im_h) { keep_index[threadIdx.x] = i; } __syncthreads(); if (threadIdx.x == 0) { int size = (num - i) < BlockSize ? num - i : BlockSize; for (int j = 0; j < size; ++j) { if (keep_index[j] > -1) { keep[cnt++] = keep_index[j]; } } } __syncthreads(); } if (threadIdx.x == 0) { keep_num[0] = cnt; } } static __device__ inline float IoU(const float *a, const float *b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float inter_s = width * height; float s_a = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float s_b = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return inter_s / (s_a + s_b - inter_s); } static __global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock); const int col_size = min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock); __shared__ float block_boxes[kThreadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 4; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (IoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, const Tensor &sorted_indices, const T nms_threshold, Tensor *keep_out) { int boxes_num = proposals.dims()[0]; PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]); const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock); dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock), DIVUP(boxes_num, kThreadsPerBlock)); dim3 threads(kThreadsPerBlock); const T *boxes = proposals.data<T>(); auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); framework::Vector<uint64_t> mask(boxes_num * col_blocks); hipLaunchKernelGGL(( NMSKernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_threshold, boxes, mask.CUDAMutableData(boost::get<platform::CUDAPlace>(ctx.GetPlace()))); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); std::vector<int> keep_vec; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / kThreadsPerBlock; int inblock = i % kThreadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; keep_vec.push_back(i); uint64_t *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace()); memory::Copy(place, keep, platform::CPUPlace(), keep_vec.data(), sizeof(int) * num_to_keep, ctx.stream()); ctx.Wait(); } template <typename T> static std::pair<Tensor, Tensor> ProposalForOneImage( const platform::CUDADeviceContext &ctx, const Tensor &im_info, const Tensor &anchors, const Tensor &variances, const Tensor &bbox_deltas, // [M, 4] const Tensor &scores, // [N, 1] int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta) { // 1. pre nms Tensor scores_sort, index_sort; SortDescending<T>(ctx, scores, &scores_sort, &index_sort); int num = scores.numel(); int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel() : pre_nms_top_n; scores_sort.Resize({pre_nms_num, 1}); index_sort.Resize({pre_nms_num, 1}); // 2. box decode and clipping Tensor proposals; proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace()); { platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num); for_range(BoxDecodeAndClipFunctor<T>{ anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(), index_sort.data<int>(), im_info.data<T>(), proposals.data<T>()}); } // 3. filter Tensor keep_index, keep_num_t; keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace()); keep_num_t.mutable_data<int>({1}, ctx.GetPlace()); min_size = ::max(min_size, 1.0f); auto stream = ctx.stream(); hipLaunchKernelGGL(( FilterBBoxes<T, 512>), dim3(1), dim3(512), 0, stream, proposals.data<T>(), im_info.data<T>(), min_size, pre_nms_num, keep_num_t.data<int>(), keep_index.data<int>()); int keep_num; const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); memory::Copy(platform::CPUPlace(), &keep_num, gpu_place, keep_num_t.data<int>(), sizeof(int), ctx.stream()); ctx.Wait(); keep_index.Resize({keep_num}); Tensor scores_filter, proposals_filter; proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace()); scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace()); GPUGather<T>(ctx, proposals, keep_index, &proposals_filter); GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(proposals_filter, scores_filter); } // 4. nms Tensor keep_nms; NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize({post_nms_top_n}); } Tensor scores_nms, proposals_nms; proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace()); scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace()); GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms); GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms); return std::make_pair(proposals_nms, scores_nms); } } // namespace template <typename DeviceContext, typename T> class CUDAGenerateProposalsKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *scores = context.Input<Tensor>("Scores"); auto *bbox_deltas = context.Input<Tensor>("BboxDeltas"); auto *im_info = context.Input<Tensor>("ImInfo"); auto anchors = detail::Ref(context.Input<Tensor>("Anchors"), "Cannot find input Anchors(%s) in scope", context.Inputs("Anchors")[0]); auto variances = detail::Ref(context.Input<Tensor>("Variances"), "Cannot find input Variances(%s) in scope", context.Inputs("Variances")[0]); auto *rpn_rois = context.Output<LoDTensor>("RpnRois"); auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs"); int pre_nms_top_n = context.Attr<int>("pre_nms_topN"); int post_nms_top_n = context.Attr<int>("post_nms_topN"); float nms_thresh = context.Attr<float>("nms_thresh"); float min_size = context.Attr<float>("min_size"); float eta = context.Attr<float>("eta"); PADDLE_ENFORCE_GE(eta, 1., "Not support adaptive NMS."); auto &dev_ctx = context.template device_context<DeviceContext>(); auto scores_dim = scores->dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; auto bbox_dim = bbox_deltas->dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; Tensor bbox_deltas_swap, scores_swap; bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox}, dev_ctx.GetPlace()); scores_swap.mutable_data<T>({num, h_score, w_score, c_score}, dev_ctx.GetPlace()); math::Transpose<DeviceContext, T, 4> trans; std::vector<int> axis = {0, 2, 3, 1}; trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis); trans(dev_ctx, *scores, &scores_swap, axis); anchors.Resize({anchors.numel() / 4, 4}); variances.Resize({variances.numel() / 4, 4}); rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4}, context.GetPlace()); rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace()); T *rpn_rois_data = rpn_rois->data<T>(); T *rpn_roi_probs_data = rpn_roi_probs->data<T>(); auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int64_t num_proposals = 0; std::vector<size_t> offset(1, 0); for (int64_t i = 0; i < num; ++i) { Tensor im_info_slice = im_info->Slice(i, i + 1); Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1); Tensor scores_slice = scores_swap.Slice(i, i + 1); bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4}); scores_slice.Resize({h_score * w_score * c_score, 1}); std::pair<Tensor, Tensor> box_score_pair = ProposalForOneImage<T>(dev_ctx, im_info_slice, anchors, variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta); Tensor &proposals = box_score_pair.first; Tensor &scores = box_score_pair.second; memory::Copy(place, rpn_rois_data + num_proposals * 4, place, proposals.data<T>(), sizeof(T) * proposals.numel(), dev_ctx.stream()); memory::Copy(place, rpn_roi_probs_data + num_proposals, place, scores.data<T>(), sizeof(T) * scores.numel(), dev_ctx.stream()); dev_ctx.Wait(); num_proposals += proposals.dims()[0]; offset.emplace_back(num_proposals); } framework::LoD lod; lod.emplace_back(offset); rpn_rois->set_lod(lod); rpn_roi_probs->set_lod(lod); rpn_rois->Resize({num_proposals, 4}); rpn_roi_probs->Resize({num_proposals, 1}); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(generate_proposals, ops::CUDAGenerateProposalsKernel< paddle::platform::CUDADeviceContext, float>);
4cf22fe3ad590925a6e93409ac982055421efbe8.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include <stdio.h> #include <string> #include <vector> #include "cub/cub.cuh" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; namespace { #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) int const kThreadsPerBlock = sizeof(uint64_t) * 8; static const double kBBoxClipDefault = std::log(1000.0 / 16.0); struct RangeInitFunctor { int start_; int delta_; int *out_; __device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; } }; template <typename T> static void SortDescending(const platform::CUDADeviceContext &ctx, const Tensor &value, Tensor *value_out, Tensor *index_out) { int num = static_cast<int>(value.numel()); Tensor index_in_t; int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num); for_range(RangeInitFunctor{0, 1, idx_in}); int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace()); const T *keys_in = value.data<T>(); T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); // Allocate temporary storage auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); auto d_temp_storage = memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation cub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); } template <typename T> struct BoxDecodeAndClipFunctor { const T *anchor; const T *deltas; const T *var; const int *index; const T *im_info; T *proposals; BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var, const int *index, const T *im_info, T *proposals) : anchor(anchor), deltas(deltas), var(var), index(index), im_info(im_info), proposals(proposals) {} T bbox_clip_default{static_cast<T>(kBBoxClipDefault)}; __device__ void operator()(size_t i) { int k = index[i] * 4; T axmin = anchor[k]; T aymin = anchor[k + 1]; T axmax = anchor[k + 2]; T aymax = anchor[k + 3]; T w = axmax - axmin + 1.0; T h = aymax - aymin + 1.0; T cx = axmin + 0.5 * w; T cy = aymin + 0.5 * h; T dxmin = deltas[k]; T dymin = deltas[k + 1]; T dxmax = deltas[k + 2]; T dymax = deltas[k + 3]; T d_cx, d_cy, d_w, d_h; if (var) { d_cx = cx + dxmin * w * var[k]; d_cy = cy + dymin * h * var[k + 1]; d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w; d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h; } else { d_cx = cx + dxmin * w; d_cy = cy + dymin * h; d_w = exp(Min(dxmax, bbox_clip_default)) * w; d_h = exp(Min(dymax, bbox_clip_default)) * h; } T oxmin = d_cx - d_w * 0.5; T oymin = d_cy - d_h * 0.5; T oxmax = d_cx + d_w * 0.5 - 1.; T oymax = d_cy + d_h * 0.5 - 1.; proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.); proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.); proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.); proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.); } __device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; } __device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; } }; template <typename T, int BlockSize> static __global__ void FilterBBoxes(const T *bboxes, const T *im_info, const T min_size, const int num, int *keep_num, int *keep) { T im_h = im_info[0]; T im_w = im_info[1]; T im_scale = im_info[2]; int cnt = 0; __shared__ int keep_index[BlockSize]; CUDA_1D_KERNEL_LOOP(i, num) { keep_index[threadIdx.x] = -1; __syncthreads(); int k = i * 4; T xmin = bboxes[k]; T ymin = bboxes[k + 1]; T xmax = bboxes[k + 2]; T ymax = bboxes[k + 3]; T w = xmax - xmin + 1.0; T h = ymax - ymin + 1.0; T cx = xmin + w / 2.; T cy = ymin + h / 2.; T w_s = (xmax - xmin) / im_scale + 1.; T h_s = (ymax - ymin) / im_scale + 1.; if (w_s >= min_size && h_s >= min_size && cx <= im_w && cy <= im_h) { keep_index[threadIdx.x] = i; } __syncthreads(); if (threadIdx.x == 0) { int size = (num - i) < BlockSize ? num - i : BlockSize; for (int j = 0; j < size; ++j) { if (keep_index[j] > -1) { keep[cnt++] = keep_index[j]; } } } __syncthreads(); } if (threadIdx.x == 0) { keep_num[0] = cnt; } } static __device__ inline float IoU(const float *a, const float *b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float inter_s = width * height; float s_a = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float s_b = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return inter_s / (s_a + s_b - inter_s); } static __global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock); const int col_size = min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock); __shared__ float block_boxes[kThreadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 4; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (IoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, const Tensor &sorted_indices, const T nms_threshold, Tensor *keep_out) { int boxes_num = proposals.dims()[0]; PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]); const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock); dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock), DIVUP(boxes_num, kThreadsPerBlock)); dim3 threads(kThreadsPerBlock); const T *boxes = proposals.data<T>(); auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); framework::Vector<uint64_t> mask(boxes_num * col_blocks); NMSKernel<<<blocks, threads>>>( boxes_num, nms_threshold, boxes, mask.CUDAMutableData(boost::get<platform::CUDAPlace>(ctx.GetPlace()))); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); std::vector<int> keep_vec; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / kThreadsPerBlock; int inblock = i % kThreadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; keep_vec.push_back(i); uint64_t *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace()); memory::Copy(place, keep, platform::CPUPlace(), keep_vec.data(), sizeof(int) * num_to_keep, ctx.stream()); ctx.Wait(); } template <typename T> static std::pair<Tensor, Tensor> ProposalForOneImage( const platform::CUDADeviceContext &ctx, const Tensor &im_info, const Tensor &anchors, const Tensor &variances, const Tensor &bbox_deltas, // [M, 4] const Tensor &scores, // [N, 1] int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta) { // 1. pre nms Tensor scores_sort, index_sort; SortDescending<T>(ctx, scores, &scores_sort, &index_sort); int num = scores.numel(); int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel() : pre_nms_top_n; scores_sort.Resize({pre_nms_num, 1}); index_sort.Resize({pre_nms_num, 1}); // 2. box decode and clipping Tensor proposals; proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace()); { platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num); for_range(BoxDecodeAndClipFunctor<T>{ anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(), index_sort.data<int>(), im_info.data<T>(), proposals.data<T>()}); } // 3. filter Tensor keep_index, keep_num_t; keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace()); keep_num_t.mutable_data<int>({1}, ctx.GetPlace()); min_size = std::max(min_size, 1.0f); auto stream = ctx.stream(); FilterBBoxes<T, 512><<<1, 512, 0, stream>>>( proposals.data<T>(), im_info.data<T>(), min_size, pre_nms_num, keep_num_t.data<int>(), keep_index.data<int>()); int keep_num; const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); memory::Copy(platform::CPUPlace(), &keep_num, gpu_place, keep_num_t.data<int>(), sizeof(int), ctx.stream()); ctx.Wait(); keep_index.Resize({keep_num}); Tensor scores_filter, proposals_filter; proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace()); scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace()); GPUGather<T>(ctx, proposals, keep_index, &proposals_filter); GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(proposals_filter, scores_filter); } // 4. nms Tensor keep_nms; NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize({post_nms_top_n}); } Tensor scores_nms, proposals_nms; proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace()); scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace()); GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms); GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms); return std::make_pair(proposals_nms, scores_nms); } } // namespace template <typename DeviceContext, typename T> class CUDAGenerateProposalsKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *scores = context.Input<Tensor>("Scores"); auto *bbox_deltas = context.Input<Tensor>("BboxDeltas"); auto *im_info = context.Input<Tensor>("ImInfo"); auto anchors = detail::Ref(context.Input<Tensor>("Anchors"), "Cannot find input Anchors(%s) in scope", context.Inputs("Anchors")[0]); auto variances = detail::Ref(context.Input<Tensor>("Variances"), "Cannot find input Variances(%s) in scope", context.Inputs("Variances")[0]); auto *rpn_rois = context.Output<LoDTensor>("RpnRois"); auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs"); int pre_nms_top_n = context.Attr<int>("pre_nms_topN"); int post_nms_top_n = context.Attr<int>("post_nms_topN"); float nms_thresh = context.Attr<float>("nms_thresh"); float min_size = context.Attr<float>("min_size"); float eta = context.Attr<float>("eta"); PADDLE_ENFORCE_GE(eta, 1., "Not support adaptive NMS."); auto &dev_ctx = context.template device_context<DeviceContext>(); auto scores_dim = scores->dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; auto bbox_dim = bbox_deltas->dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; Tensor bbox_deltas_swap, scores_swap; bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox}, dev_ctx.GetPlace()); scores_swap.mutable_data<T>({num, h_score, w_score, c_score}, dev_ctx.GetPlace()); math::Transpose<DeviceContext, T, 4> trans; std::vector<int> axis = {0, 2, 3, 1}; trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis); trans(dev_ctx, *scores, &scores_swap, axis); anchors.Resize({anchors.numel() / 4, 4}); variances.Resize({variances.numel() / 4, 4}); rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4}, context.GetPlace()); rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace()); T *rpn_rois_data = rpn_rois->data<T>(); T *rpn_roi_probs_data = rpn_roi_probs->data<T>(); auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int64_t num_proposals = 0; std::vector<size_t> offset(1, 0); for (int64_t i = 0; i < num; ++i) { Tensor im_info_slice = im_info->Slice(i, i + 1); Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1); Tensor scores_slice = scores_swap.Slice(i, i + 1); bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4}); scores_slice.Resize({h_score * w_score * c_score, 1}); std::pair<Tensor, Tensor> box_score_pair = ProposalForOneImage<T>(dev_ctx, im_info_slice, anchors, variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta); Tensor &proposals = box_score_pair.first; Tensor &scores = box_score_pair.second; memory::Copy(place, rpn_rois_data + num_proposals * 4, place, proposals.data<T>(), sizeof(T) * proposals.numel(), dev_ctx.stream()); memory::Copy(place, rpn_roi_probs_data + num_proposals, place, scores.data<T>(), sizeof(T) * scores.numel(), dev_ctx.stream()); dev_ctx.Wait(); num_proposals += proposals.dims()[0]; offset.emplace_back(num_proposals); } framework::LoD lod; lod.emplace_back(offset); rpn_rois->set_lod(lod); rpn_roi_probs->set_lod(lod); rpn_rois->Resize({num_proposals, 4}); rpn_roi_probs->Resize({num_proposals, 1}); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(generate_proposals, ops::CUDAGenerateProposalsKernel< paddle::platform::CUDADeviceContext, float>);
a0f7441fb16a849a320e0db919619277447292dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_regression_cuda.h" #include "auxilliary_functions.hpp" #include "Rodeo_macros.hpp" #include "test_functions.hpp" #include<stdio.h> #include<iostream> #include<math.h> #include <armadillo> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #include <codi.hpp> using namespace arma; //This implementation using CAS incurs a non-trivial cost though. //Had to use this because compute < 600 doesn't support atomic add with float and > 600 throws up some MemCpy - invalid code error //__device__ float atomicDAdd(float* address, float val); // //__device__ float atomicDAdd(float* address, float val) //{ // unsigned long long int* address_as_ull = // (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, // __float_as_longlong(val + // __longlong_as_float(assumed))); // } while (assumed != old); // return __longlong_as_float(old); //} //__managed__ float MDevice[numVar*numVar+1]; __constant__ float MDevice[numVar*numVar+1]; float gaussianKernel(frowvec &xi, frowvec &xj, float sigma, fmat &M) { #if 0 printf("calling gaussianKernel...\n"); xi.print(); xj.print(); #endif /* calculate distance between xi and xj with the matrix M */ float metricVal = calcMetric(xi, xj, M); #if 0 printf("metricVal = %10.7f\n",metricVal); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); kernelVal += 10E-14; #if 0 printf("kernelVal = %10.7f\n",kernelVal); #endif return kernelVal; } float SIGN(float a, float b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } codi::RealReverse SIGN(codi::RealReverse a, codi::RealReverse b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } codi::RealForward SIGN(codi::RealForward a, codi::RealForward b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } float PYTHAG(float a, float b) { float at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } codi::RealReverse PYTHAG(codi::RealReverse a, codi::RealReverse b) { codi::RealReverse at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } codi::RealForward PYTHAG(codi::RealForward a, codi::RealForward b) { codi::RealForward at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } /** calculate regularization terms for the given matrix L * * @param[in] L: lower diagonal matrix * @param[in] wSvd: weight for the svd regularization part * @param[in] w12: weight for the mixed 12 regularization part * @param[out] regTerm * */ int calcRegTerms(float *L, float *regTerm, float wSvd, float w12, int dim) { int flag, i, its, j, jj, k, l = 0, nm; float c, f, h, s, x, y, z; float anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; float **a; a = new float*[dim]; for (i = 0; i < dim; i++) { a[i] = new float[dim]; } float **M; M= new float*[dim]; for (i = 0; i < dim; i++) { M[i] = new float[dim]; } float **LT; LT = new float*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new float[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = L[i*dim+j]; } } #if 0 printf("L = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", L[i*dim+j]); } printf("\n"); } #endif #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += L[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j]); } printf("\n"); } #endif #if 0 /* only for validation */ mat Lval(dim,dim); mat LTval(dim,dim); mat aval(dim,dim); for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { Lval(i,j) = Lin(i,j); } LTval = trans(Lval); aval = Lval*LTval; printf("aval = \n"); aval.print(); #endif /* SVD part */ float **v; v = new float*[n]; for (i = 0; i < n; i++) { v[i] = new float[n]; } float *w = new float[n]; float *rv1 = new float[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* sort the singular values */ float temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* normalization */ float wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i]); } #endif float svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif float reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i],(i+1)*w[i]); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd); #endif float reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1); #endif for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; *regTerm = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; #if 0 printf("result = %10.7f\n",*regTerm); #endif return 0; } /* forward mode */ int calcRegTerms(float *L, float *regTerm,float *regTermd, float wSvd, float w12, int dim, int derIndx) { int flag, i, its, j, jj, k, l = 0, nm; codi::RealForward c, f, h, s, x, y, z; codi::RealForward anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; codi::RealForward *Lcodi = new codi::RealForward[dim*dim]; for (int i = 0; i < dim*dim; i++) { Lcodi[i] = L[i]; } Lcodi[derIndx].setGradient(1.0); codi::RealForward **a; a = new codi::RealForward*[dim]; for (i = 0; i < dim; i++) { a[i] = new codi::RealForward[dim]; } codi::RealForward **M; M= new codi::RealForward*[dim]; for (i = 0; i < dim; i++) { M[i] = new codi::RealForward[dim]; } codi::RealForward **LT; LT = new codi::RealForward*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new codi::RealForward[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = Lcodi[i*dim+j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += Lcodi[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j]); } printf("\n"); } #endif /* SVD part */ codi::RealForward **v; v = new codi::RealForward*[n]; for (i = 0; i < n; i++) { v[i] = new codi::RealForward[n]; } codi::RealForward *w = new codi::RealForward[n]; codi::RealForward *rv1 = new codi::RealForward[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale!= 0) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale!=0) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g!=0) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g!=0) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z!=0) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* sort the singular values */ codi::RealForward temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i].getValue()); } #endif /* normalization */ codi::RealForward wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum.getValue()); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i].getValue()); } #endif float svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif codi::RealForward reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i].getValue(),(i+1)*w[i].getValue()); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd.getValue()); #endif codi::RealForward reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1.getValue()); #endif for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; codi::RealForward result = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; *regTerm = result.getValue(); *regTermd = result.getGradient(); return 0; } int calcRegTerms(float *L, float *Lb,float *result , float wSvd, float w12, int dim) { int flag, i, its, j, jj, k, l = 0, nm; codi::RealReverse *Lcodi = new codi::RealReverse[dim*dim]; for (int i = 0; i < dim*dim; i++) { Lcodi[i] = L[i]; } /* activate tape and register input */ codi::RealReverse::TapeType& tape = codi::RealReverse::getGlobalTape(); tape.setActive(); codi::RealReverse regTerm=0.0; for (int i = 0; i < dim*dim; i++) { tape.registerInput(Lcodi[i]); } codi::RealReverse c, f, h, s, x, y, z; codi::RealReverse anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; codi::RealReverse **a; a = new codi::RealReverse*[dim]; for (i = 0; i < dim; i++) { a[i] = new codi::RealReverse[dim]; } codi::RealReverse **M; M = new codi::RealReverse*[dim]; for (i = 0; i < dim; i++) { M[i] = new codi::RealReverse[dim]; } codi::RealReverse **LT; LT = new codi::RealReverse*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new codi::RealReverse[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = Lcodi[i*dim+j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j].getValue()); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += Lcodi[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i][j].getValue()); } printf("\n"); } #endif #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j].getValue()); } printf("\n"); } #endif /* SVD part */ codi::RealReverse **v; v = new codi::RealReverse*[n]; for (i = 0; i < n; i++) { v[i] = new codi::RealReverse[n]; } codi::RealReverse *w = new codi::RealReverse[n]; codi::RealReverse *rv1 = new codi::RealReverse[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale != 0) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale !=0) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g !=0) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g != 0) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z != 0) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif codi::RealReverse temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i].getValue()); } #endif codi::RealReverse wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum.getValue()); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i].getValue()); } #endif codi::RealReverse svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif codi::RealReverse reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i].getValue(),(i+1)*w[i].getValue()); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd.getValue()); #endif codi::RealReverse reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1.getValue()); #endif regTerm = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; #if 0 printf("w12 * reg_term_L1 = %10.7f\n",w12 * reg_term_L1.getValue()); #endif tape.registerOutput(regTerm); tape.setPassive(); regTerm.setGradient(1.0); tape.evaluate(); for (int i = 0; i < numVar*numVar; i++) { Lb[i] = Lcodi[i].getGradient(); } tape.reset(); *result = regTerm.getValue(); for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; delete[] Lcodi; return 0; } float calcKernelValCPU(rowvec &xi, rowvec &xj, mat &M, float sigma){ rowvec diff = xi - xj; colvec diffT = trans(diff); vec matVecProd = M * diffT; // printf("M * xdiff = \n"); // matVecProd.print(); float metricVal = dot(diff, M * diffT); float sqr_two_pi = sqrt(2.0 * 3.14159265359); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); return (kernelVal); } /* * calculates the generalized Mahalanobis distance between two points * * @param[in] x_i : first vector * @param[in] X_j : second vector * @param[in] M : dim x dim matrix * @param[in] dim * @return distance * * */ float calcMetric(float *xi, float *xj, float *M, int dim) { #if 0 printf("calling calcMetric (primal)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j]); } printf("\n"); } #endif float *diff = new float[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) M_val(i, j) = M[i][j]; diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif float *tempVec = new float[dim]; float sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for(int i=0; i<dim; i++) { printf("%10.7f \n",tempVec[i] ); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n",sum); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at FILE = %s, LINE = %d.\n",__FILE__, __LINE__); exit(-1); } return sum; } /* * calculates the generalized Mahalanobis distance between two points, codiPack reverse mode * (differentiated in reverse mode ) * @param[in] x_i : first vector * @param[in] X_j : second vector * @param[in] M : dim x dim matrix * @param[in] dim * @return distance * * */ codi::RealReverse calcMetric(float *xi, float *xj, codi::RealReverse *M, int dim) { #if 0 printf("calling calcMetric (adjoint)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } #endif codi::RealReverse *diff = new codi::RealReverse[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { M_val(i, j) = M[i*dim+j].getValue(); } diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif codi::RealReverse *tempVec = new codi::RealReverse[dim]; codi::RealReverse sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for (int i = 0; i < dim; i++) { printf("%10.7f \n", tempVec[i].getValue()); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n", sum.getValue()); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "metric val = %10.7f\n",sum.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } return sum; } codi::RealForward calcMetric(float *xi, float *xj, codi::RealForward *M, int dim) { #if 0 printf("calling calcMetric (adjoint)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } #endif codi::RealForward *diff = new codi::RealForward[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { M_val(i, j) = M[i*dim+j].getValue(); } diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif codi::RealForward *tempVec = new codi::RealForward[dim]; codi::RealForward sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for (int i = 0; i < dim; i++) { printf("%10.7f \n", tempVec[i].getValue()); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n", sum.getValue()); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "metric val = %10.7f\n",sum.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } return sum; } float gaussianKernel(float *xi, float *xj, float sigma, float *M, int dim) { #if 0 printf("calling gaussianKernel...\n"); #endif /* calculate distance between xi and xj with the matrix M */ float metricVal = calcMetric(xi, xj, M, dim); #if 0 printf("metricVal = %10.7f\n",metricVal); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); #if 0 printf("kernelVal = %10.7f\n",kernelVal); #endif if(isnan(kernelVal)){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); exit(-1); } if(kernelVal < 0.0){ fprintf(stderr, "Error: kernel value is negative! at %s, line %d.\n",__FILE__, __LINE__); exit(-1); } kernelVal += 10E-14; return kernelVal; } codi::RealReverse gaussianKernel(float *xi, float *xj, codi::RealReverse sigma, codi::RealReverse *M, int dim) { #if 0 printf("calling gaussianKernel...\n"); #endif /* calculate distance between xi and xj with the matrix M */ codi::RealReverse metricVal = calcMetric(xi, xj, M, dim); #if 0 printf("metricVal = %10.7f\n",metricVal.getValue()); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); codi::RealReverse kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); if(isnan(kernelVal.getValue())){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); printf("sigma = %10.7f\n",sigma.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } kernelVal += 10E-14; // printf("EPSILON = %10.7f ", EPSILON); if(kernelVal.getValue() < 0.0){ fprintf(stderr, "Error: kernel value is negative or zero! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "kernelVal = %20.15f\n",kernelVal.getValue() ); fprintf(stderr, "metric val = %20.15f\n",metricVal.getValue()); fprintf(stderr, "sigma = %20.15f\n",sigma.getValue()); fprintf(stderr, "exp(-metricVal / (2 * sigma * sigma)) = %20.15f\n",exp(-metricVal / (2 * sigma * sigma)).getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } #if 0 printf("kernelVal = %10.7f\n",kernelVal.getValue()); #endif return kernelVal; } codi::RealForward gaussianKernel(float *xi, float *xj, codi::RealForward sigma, codi::RealForward *M, int dim) { /* calculate distance between xi and xj with the matrix M */ codi::RealForward metricVal = calcMetric(xi, xj, M, dim); float sqr_two_pi = sqrt(2.0 * datum::pi); codi::RealForward kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); if(isnan(kernelVal.getValue())){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); printf("sigma = %10.7f\n",sigma.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } kernelVal += 10E-14; // printf("EPSILON = %10.7f ", EPSILON); if(kernelVal.getValue() < 0.0){ fprintf(stderr, "Error: kernel value is negative or zero! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "kernelVal = %20.15f\n",kernelVal.getValue() ); fprintf(stderr, "metric val = %20.15f\n",metricVal.getValue()); fprintf(stderr, "sigma = %20.15f\n",sigma.getValue()); fprintf(stderr, "exp(-metricVal / (2 * sigma * sigma)) = %20.15f\n",exp(-metricVal / (2 * sigma * sigma)).getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } #if 0 printf("kernelVal = %10.7f\n",kernelVal.getValue()); #endif return kernelVal; } void calcLossFunCPU(float *result, float *input, float *data,int N){ float LT[numVar][numVar]; float L[numVar][numVar]; float M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) LT[i][j] = 0.0; for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j) LT[j][i] = L[i][j]; } for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; M[numVar*numVar] = input[numVar*numVar]; float sigma = M[numVar*numVar]; float *xp = new float[numVar]; float *xi = new float[numVar]; float *kernelVal = new float[N]; float lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif for (int k = 0; k < numVar; k++) { xp[k] = data[i*(numVar+1)+k]; } float kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { for (int k = 0; k < numVar; k++) { xi[k] = data[j*(numVar+1)+k]; } kernelVal[j] = gaussianKernel(xi, xp, sigma, M, numVar); kernelSum += kernelVal[j]; #if 0 printf("kernelVal[%d]=%10.7f\n",j,kernelVal[j]); #endif } } float fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelVal[j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; *result = lossFunc; delete[] xp; delete[] xi; delete[] kernelVal; } void calcLossFunCPU(codi::RealReverse *result, codi::RealReverse *input, float *inputb,float *data,int N){ /* activate tape and register input */ codi::RealReverse::TapeType& tape = codi::RealReverse::getGlobalTape(); tape.setActive(); for (int i = 0; i < numVar*numVar+1; i++) { tape.registerInput(input[i]); } codi::RealReverse LT[numVar][numVar]; codi::RealReverse L[numVar][numVar]; codi::RealReverse M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { LT[i][j] = 0.0; } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j){ LT[j][i] = L[i][j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j].getValue()); } printf("\n"); } #endif /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) { M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getValue()); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; codi::RealReverse sigma = M[numVar*numVar]; float *xi = new float[numVar]; float *xj = new float[numVar]; codi::RealReverse **kernelValTable = new codi::RealReverse*[N]; for(int i=0; i<N;i++) { kernelValTable[i] = new codi::RealReverse[N]; } for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) kernelValTable[i][j] = 0.0; for (int i = 0; i < N; i++) { for (int j = i+1; j < N; j++) { for (int k = 0; k < numVar; k++) { xi[k] = data[i*(numVar+1)+k]; xj[k] = data[j*(numVar+1)+k]; } kernelValTable[i][j] = gaussianKernel(xi, xj, sigma, M, numVar); kernelValTable[j][i] = kernelValTable[i][j]; // printf("%d kernelValTable[%d][%d] = %10.7f\n",i*N+j,i,j,kernelValTable[i][j].getValue()); } } codi::RealReverse lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif codi::RealReverse kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { kernelSum += kernelValTable[i][j]; } } codi::RealReverse fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelValTable[i][j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox.getValue()); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; #if 1 printf("lossFunc (reverse mode CodiPack) = %10.7f\n",lossFunc.getValue()); #endif *result = lossFunc; tape.registerOutput(*result); tape.setPassive(); result->setGradient(1.0); tape.evaluate(); #if 0 printf("Mb = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getGradient()); } printf("\n"); } #endif for (int i = 0; i < numVar*numVar+1; i++) { inputb[i] = input[i].getGradient(); } tape.reset(); delete[] xi; delete[] xj; for(int i=0; i<N;i++) { delete[] kernelValTable[i]; } delete[] kernelValTable; } void calcLossFunCPU(codi::RealForward *result, codi::RealForward *input,int tldIndx, float *data,int N){ input[tldIndx].setGradient(1.0); codi::RealForward LT[numVar][numVar]; codi::RealForward L[numVar][numVar]; codi::RealForward M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { LT[i][j] = 0.0; } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j){ LT[j][i] = L[i][j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) { M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getValue()); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; codi::RealForward sigma = M[numVar*numVar]; float *xi = new float[numVar]; float *xj = new float[numVar]; codi::RealForward **kernelValTable = new codi::RealForward*[N]; for(int i=0; i<N;i++) { kernelValTable[i] = new codi::RealForward[N]; } for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) kernelValTable[i][j] = 0.0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { if(j>i){ for (int k = 0; k < numVar; k++) { xi[k] = data[i*(numVar+1)+k]; xj[k] = data[j*(numVar+1)+k]; } kernelValTable[i][j] = gaussianKernel(xi, xj, sigma, M, numVar); } } } codi::RealForward lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif codi::RealForward kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { kernelSum += kernelValTable[i][j]; } } codi::RealForward fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelValTable[i][j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox.getValue()); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; #if 0 printf("lossFunc = %10.7f\n",lossFunc.getValue()); #endif *result = lossFunc; delete[] xi; delete[] xj; for(int i=0; i<N;i++) { delete[] kernelValTable[i]; } delete[] kernelValTable; } __global__ void calculateKernelValues_b(float *ab, float *X, float *kernelValTable, float *kernelValTableb, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; float sigma = MDevice[numVar*numVar]; float sigmab = 0.0; /* calculate column index */ int indx2 = tid%N; /* calculate row index */ int indx1 = tid/N; if (indx2 > indx1) { int off1 = indx1*(numVar+1); int off2 = indx2*(numVar+1); float diff[numVar]; float tempVec[numVar]; float tempVecb[numVar]; float sumb = 0.0; float kernelValb = 0.0; float temp; float temp0; float tempb; float tempb0; for (int k = 0; k < numVar; ++k) diff[k] = X[off1 + k] - X[off2 + k]; float sum = 0.0; for (int i = 0; i < numVar; ++i) { for (int j = 0; j < numVar; ++j) sum = sum + MDevice[i*numVar+j]*diff[j]; tempVec[i] = sum; sum = 0.0; } sum = 0.0; for (int i = 0; i < numVar; ++i) sum = sum + tempVec[i]*diff[i]; float sqr_two_pi; sqr_two_pi = sqrt(2.0*3.14159265359); float kernelVal = 1.0/(sigma*sqr_two_pi)*exp(-sum/(2*sigma*sigma))+10E-12; kernelValb = kernelValTableb[indx1*N + indx2]; kernelValTableb[indx1*N + indx2] = 0.0; tempb = kernelValb/(sqr_two_pi*sigma); temp = 2*(sigma*sigma); temp0 = sum/temp; // temp0 = sum/2*(sigma*sigma) tempb0 = -(exp(-temp0)*tempb/temp); // -(exp(-sum/2*(sigma*sigma))*kernelValb/(sqr_two_pi*sigma)/temp) sumb = tempb0; sigmab = -(exp(-temp0)*tempb/sigma) - 2*2*temp0*sigma*tempb0; for (int i = 0; i < numVar; ++i){ tempVecb[i] = 0.0; } for (int i = numVar-1; i > -1; --i){ tempVecb[i] = tempVecb[i] + diff[i]*sumb; } for (int i = numVar-1; i > -1; --i) { sumb = tempVecb[i]; tempVecb[i] = 0.0; for (int j = numVar-1; j > -1; --j){ float addTerm = diff[j]*sumb; atomicAdd( &ab[i*numVar + j],addTerm ); } // ab[i*numVar + j] = ab[i*numVar + j] + diff[j]*sumb; } } atomicAdd( &ab[numVar*numVar],sigmab ); // ab[numVar*numVar] = ab[numVar*numVar] + sigmab; } __global__ void calculateKernelValues(float *X, float *kernelValTable, int N){ int tid = threadIdx.x + blockIdx.x * blockDim.x; float sigma = MDevice[numVar*numVar]; /* calculate column index */ int indx2 = tid%N; /* calculate row index */ int indx1 = tid/N; if(indx2 > indx1){ int off1 = indx1*(numVar+1); int off2 = indx2*(numVar+1); float diff[numVar]; for (int k = 0; k < numVar; k++) { diff[k] = X[off1+k] - X[off2+k]; } float tempVec[numVar]; float sum = 0.0; for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { sum = sum + MDevice[i*numVar+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } sum = 0.0; for (int i = 0; i < numVar; i++) { sum = sum + tempVec[i] * diff[i]; } float sqr_two_pi = sqrt(2.0 * 3.14159265359); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-sum / (2 * sigma * sigma)) + 10E-12; kernelValTable[indx1*N+indx2]= kernelVal; } } __global__ void calculateLossKernel(float *X,float *kernelValTable, float *sum, int N){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < N){ float lossFunc = 0.0; float kernelSum = 0.0; for(int i=0; i<N; i++){ if(tid != i){ int indxKernelValTable; if(i<tid) { indxKernelValTable = i*N+tid; } else{ indxKernelValTable = tid*N+i; } kernelSum += kernelValTable[indxKernelValTable]; } } float fapprox=0.0; for(int i=0; i<N; i++){ if(tid != i){ int indxKernelValTable; if(i<tid) { indxKernelValTable = i*N+tid; } else{ indxKernelValTable = tid*N+i; } fapprox += (kernelValTable[indxKernelValTable]/kernelSum)* X[i*(numVar+1)+numVar]; } } // lossFunc = (fapprox - X[tid*(numVar+1)+numVar]) * (fapprox - X[tid*(numVar+1)+numVar]); lossFunc = fabs(fapprox - X[tid*(numVar+1)+numVar]); sum[tid] = lossFunc; } } __global__ void calculateLossKernel_b(float *X, float *kernelValTable, float * kernelValTableb, float *sum, float *sumb, int N ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { float lossFunc; float lossFuncb; float kernelSum=0.0; float kernelSumb; float fapproxb; for (int i = 0; i < N; ++i){ if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; kernelSum = kernelSum + kernelValTable[indxKernelValTable]; } } float fapprox = 0.0; for (int i = 0; i < N; ++i){ if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; fapprox = fapprox + kernelValTable[indxKernelValTable]/ kernelSum*X[i*(numVar+1)+numVar]; #if 0 if (isnan (fapprox ) || isinf (fapprox) ){ printf("fapprox is NaN or inf %10.7f\n",kernelSum); assert(0); } #endif } } // lossFunc = (fapprox - X[tid*(numVar+1)+numVar]) * (fapprox - X[tid*(numVar+1)+numVar]); lossFunc = fabs ( (fapprox - X[tid*(numVar+1)+numVar]) ); sum[tid] = lossFunc; lossFuncb = sumb[tid]; #if 0 if (isnan (sumb[tid] ) || isinf (sumb[tid]) ){ printf("sumb[tid] is NaN or inf!\n"); } #endif sumb[tid] = 0.0; // fapproxb = 2*(fapprox-X[tid*(numVar+1)+numVar])*lossFuncb; if((fapprox - X[tid*(numVar+1)+numVar]) >= 0){ fapproxb = lossFuncb; } else{ fapproxb = -lossFuncb; } kernelSumb = 0.0; for (int i = N-1; i > -1; --i) { if (tid != i) { float tempb; int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; tempb = X[i*(numVar+1)+numVar]*fapproxb/kernelSum; kernelValTableb[indxKernelValTable] = kernelValTableb[indxKernelValTable] + tempb; kernelSumb = kernelSumb - kernelValTable[indxKernelValTable]* tempb/kernelSum; } } for (int i = N-1; i > -1; --i) { if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; kernelValTableb[indxKernelValTable] = kernelValTableb[indxKernelValTable] + kernelSumb; } } } } void calcLossFunGPU(float *result, float *input, float *data,int N){ hipEvent_t start, stop; hipEventCreate( &start ) ; hipEventCreate( &stop ) ; hipEventRecord( start, 0 ) ; // Error code to check return values for CUDA calls hipError_t err = hipSuccess; float LT[numVar][numVar]; float L[numVar][numVar]; float M[numVar*numVar+1]; for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { L[i][j]=input[i*numVar+j]; } #if 1 printf("Data (host) = \n"); for (int i = 0; i < N; i++) { for (int j = 0; j < numVar+1; j++) { printf("%10.7f ", data[i*(numVar+1)+j]); } printf("\n"); } #endif #if 1 printf("L = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", L[i][j]); } printf("\n"); } #endif for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { LT[i][j]=0.0; } for (int i = 0; i < numVar; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = L[i][j]; } } #if 1 printf("LT = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < numVar; ++i) for(int j = 0; j < numVar; ++j) { M[i*numVar+j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < numVar; ++i) for(int j = 0; j < numVar; ++j) for(int k = 0; k < numVar; ++k) { M[i*numVar+j] += L[i][k] * LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", M[i*numVar+j]); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; /* copy the values of M to the constant memory */ err= hipMemcpyToSymbol(MDevice,M, (numVar*numVar+1)*sizeof(float)); //for(int i=0; i<numVar*numVar+1; i++)MDevice[i] = M[i]; if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *dataDevice; // allocate the memory on the GPU for the data matrix err = hipMalloc(&dataDevice, N *(numVar+1) * sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector data (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(dataDevice, data, N *(numVar+1) *sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector data from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *kernelValuesDevice; // allocate the memory on the GPU for kernel Values err = hipMalloc(&kernelValuesDevice, N*N* sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector kernel values (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; printf("Launching the first kernel with %d blocks...\n",number_of_blocks); hipLaunchKernelGGL(( calculateKernelValues), dim3(number_of_blocks),dim3(number_of_threads_per_block), 0, 0, dataDevice, kernelValuesDevice, N); hipDeviceSynchronize(); printf("Kernel: calculateKernelValues is done ...\n"); #if 1 /* this part is for validation */ mat Mval(numVar,numVar); mat Xval(N,numVar); vec ys(N); float *kernelValuesHost = new float[N*N]; err = hipMemcpy(kernelValuesHost, kernelValuesDevice, N*N*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector kernelValues from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for(int i=0; i<numVar; i++){ for(int j=0; j<numVar; j++){ Mval(i,j) = M[i*numVar+j]; } } printf("Mval = \n"); Mval.print(); for(int i=0; i<N; i++){ for(int j=0; j<numVar; j++){ Xval(i,j) = data[i*(numVar+1)+j]; } ys(i) = data[i*(numVar+1)+(numVar)]; } printf("Xval = \n"); Xval.print(); printf("ys = \n"); ys.print(); float sigma = input[numVar*numVar]; rowvec xi,xj; for(int i=0; i<N; i++){ for(int j=i+1; j<N; j++){ xi = Xval.row(i); xj = Xval.row(j); float kernelValCPU = calcKernelValCPU(xi, xj, Mval, sigma); float kernelValGPU = kernelValuesHost[i*N+j]; printf("kernelValCPU = %19.7f, kernelValGPU = %19.7f, error = %15.12f\n",kernelValCPU,kernelValGPU,kernelValCPU-kernelValGPU); } } delete[] kernelValuesHost; #endif /* allocate the memory on the GPU for the kernelsum */ float *lossSumDevice; err = hipMalloc(&lossSumDevice, N * sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector lossSumDevice (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *lossSumHost = new float[N]; number_of_blocks = (N+number_of_threads_per_block-1)/number_of_threads_per_block; printf("Launching the second kernel with %d blocks...\n",number_of_blocks); hipLaunchKernelGGL(( calculateLossKernel), dim3(number_of_blocks),dim3(number_of_threads_per_block), 0, 0, dataDevice,kernelValuesDevice, lossSumDevice, N); hipDeviceSynchronize(); printf("Kernel: calculateLossKernel is done ...\n"); err = hipMemcpy(lossSumHost, lossSumDevice, N*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector lossSum from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } #if 0 vec lossValCPU(N); /* this part is for validation */ for (int i=0; i<N; i++) { rowvec xi = Xval.row(i); float kernelSum=0.0; for(int j=0; j<N; j++){ rowvec xj = Xval.row(j); if(i !=j){ float kernelVal = calcKernelValCPU(xi, xj, Mval, sigma); kernelSum += kernelVal; } } float sum = 0.0; for(int j=0; j<N; j++){ rowvec xj = Xval.row(j); float kernelVal = calcKernelValCPU(xi, xj, Mval, sigma); if(i !=j){ sum+=ys(j)*kernelVal; } } sum = sum/kernelSum; lossValCPU(i) = (ys(i)-sum)*(ys(i)-sum); } for (int i=0; i<N; i++) { printf( "lossGPU[%d] = %10.7f, lossGPU[%d] = %10.7f\n", i,lossSumHost[i],i, lossValCPU(i)); } #endif float totalLoss=0.0; for (int i=0; i<N; i++) { totalLoss+=lossSumHost[i]; } *result = totalLoss/N; hipEventRecord( stop, 0 ) ; hipEventSynchronize( stop ) ; float elapsedTime; hipEventElapsedTime( &elapsedTime,start, stop ) ; printf( "Time to generate:%3.1f ms\n", elapsedTime ); hipEventDestroy( start ) ; hipEventDestroy( stop ) ; delete[] lossSumHost; hipFree(lossSumDevice); hipFree(kernelValuesDevice); hipFree(dataDevice); } void calcLossFunGPU_b(float *result, float *resultb, float *input, float *inputb, float *data, int N) { #if 0 printf("calling calcLossFunGPU_b...\n"); printf("resultb = %10.7f\n",*resultb); printf("Data has %d points\n",N); #endif #if 0 hipEvent_t start, stop; hipEventCreate( &start ) ; hipEventCreate( &stop ) ; hipEventRecord( start, 0 ) ; #endif // Error code to check return values for CUDA calls hipError_t err = hipSuccess; float LT[numVar][numVar]; float LTb[numVar][numVar]; float L[numVar][numVar]; float Lb[numVar][numVar]; float M[numVar*numVar + 1]; float Mb[numVar*numVar + 1]; for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ L[i][j] = input[i*numVar + j]; Lb[i][j] = 0.0; LT[i][j] = 0.0; LTb[i][j] = 0.0; } } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j) LT[j][i] = L[i][j]; } #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j]); } printf("\n"); } printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j]); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { M[i*numVar + j] = 0; Mb[i*numVar + j] = 0; } /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j]); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; #if 0 printf("sigma = %10.7f\n", M[numVar*numVar]); #endif /* copy the values of M to the constant memory "MDevice"*/ err= hipMemcpyToSymbol(MDevice,M, (numVar*numVar+1)*sizeof(float)); //for(int i=0; i<numVar*numVar+1; i++)MDevice[i] = M[i]; if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *dataDevice; /* allocate the memory on the GPU for the data matrix */ err = hipMalloc(&dataDevice, N *(numVar+1) * sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector data (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(dataDevice, data, N *(numVar+1) *sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector data from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *kernelValuesDevice; // allocate the memory on the GPU for kernel Values err = hipMalloc(&kernelValuesDevice, N*N* sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector --kernelValuesDevice-- (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMemset(kernelValuesDevice, 0, N*N* sizeof(float)); float *kernelValuesDeviceb; // allocate the memory on the GPU for kernel Values err = hipMalloc(&kernelValuesDeviceb, N*N* sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector --kernelValuesDeviceb-- (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // hipMemset(kernelValuesDeviceb, 0, N*N* sizeof(float)); float *kernelValuesHostb = new float[N*N]; for(int i=0; i<N*N; i++) { kernelValuesHostb[i] = 0.0; } err = hipMemcpy(kernelValuesDeviceb, kernelValuesHostb, (N*N) *sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector kernelValuesDeviceb from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *MDeviceb; // allocate the memory on the GPU for kernel Values err = hipMalloc(&MDeviceb, (numVar*numVar + 1)* sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector MDeviceb (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *MHostb = new float[numVar*numVar + 1]; for(int i=0; i<numVar*numVar + 1; i++) { MHostb[i] = 0.0; } err = hipMemcpy(MDeviceb, MHostb, (numVar*numVar + 1) *sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector MHostb from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } delete[] MHostb; /* init adjoint of M to zero */ // hipMemset(MDeviceb, 0, (numVar*numVar + 1)* sizeof(float)); int number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the first primal kernel with %d blocks...\n",number_of_blocks); #endif hipLaunchKernelGGL(( calculateKernelValues), dim3(number_of_blocks),dim3(number_of_threads_per_block), 0, 0, dataDevice, kernelValuesDevice, N); hipDeviceSynchronize(); #if 0 printf("The primal kernel : calculateKernelValues is done...\n"); #endif float *lossSumDevice; // allocate the memory on the GPU for kernel Values err = hipMalloc(&lossSumDevice, N*sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector --lossSumDevice-- (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMemset(lossSumDevice,0,N*sizeof(float)); number_of_blocks = (N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the second primal kernel + adjoint with %d blocks...\n",number_of_blocks); #endif float totalLossb = 0.0; totalLossb = *resultb/N; float *lossSumHostb = new float[N]; for(int i=0; i<N;i++) lossSumHostb[i] = 0;; float *lossSumDeviceb; // allocate the memory on the GPU for kernel Values err = hipMalloc(&lossSumDeviceb, N*sizeof(float) ) ; if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector --lossSumDeviceb-- (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMemset(lossSumDeviceb,0,N*sizeof(float)); for (int i = N-1; i > -1; --i) lossSumHostb[i] = lossSumHostb[i] + totalLossb; #if 0 for (int i = N-1; i > -1; --i) printf("lossSumHostb[i] = %10.7f\n",i,lossSumHostb[i]); #endif err = hipMemcpy(lossSumDeviceb, lossSumHostb, N *sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector --lossSumDevice-- from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // hipMemset(kernelValuesDeviceb, 0, (N*N)* sizeof(float)); /* this subroutine evaluates the lossSumDevice and kernelValuesDeviceb */ hipLaunchKernelGGL(( calculateLossKernel_b), dim3(number_of_blocks),dim3(number_of_threads_per_block), 0, 0, dataDevice,kernelValuesDevice,kernelValuesDeviceb, lossSumDevice,lossSumDeviceb, N); hipDeviceSynchronize(); //hipDeviceSynchronize(); #if 0 printf("Kernel: calculateLossKernel_b is done ...\n"); #endif err = hipMemcpy(kernelValuesHostb, kernelValuesDeviceb, N*N *sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector --kernelValues-- from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *lossSumHost = new float[N](); err = hipMemcpy(lossSumHost, lossSumDevice, N*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector lossSum from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float totalLoss=0.0; for (int i=0; i<N; i++) { totalLoss+=lossSumHost[i]; } *result = totalLoss/N; #if 0 printf("result = %10.7f\n",*result); #endif /* reverse sweep starts from here */ hipMemset(MDeviceb, 0, (numVar*numVar + 1)* sizeof(float)); number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the second adjoint kernel with %d blocks...\n",number_of_blocks); #endif /* this subroutine evaluates MDeviceb */ hipLaunchKernelGGL(( calculateKernelValues_b), dim3(number_of_blocks),dim3(number_of_threads_per_block), 0, 0, MDeviceb, dataDevice, kernelValuesDevice, kernelValuesDeviceb, N); hipDeviceSynchronize(); #if 0 printf("Kernel: calculateKernelValues_b is done ...\n"); #endif for (int ii1 = 0; ii1 < numVar*numVar+1; ++ii1) { Mb[ii1] = 0.0; } err = hipMemcpy(Mb, MDeviceb, (numVar*numVar+1)*sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector Mb from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } #if 0 printf("Mb = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", Mb[i*numVar+j]); } printf("\n"); } #endif for (int i = numVar-1; i > -1; --i) for (int j = numVar-1; j > -1; --j) for (int k = numVar-1; k > -1; --k) { Lb[i][k] = Lb[i][k] + LT[k][j]*Mb[i*numVar+j]; LTb[k][j] = LTb[k][j] + L[i][k]*Mb[i*numVar+j]; } for (int i = numVar-1; i > -1; --i) { for (int j = i; j > -1; --j) { Lb[i][j] = Lb[i][j] + LTb[j][i]; LTb[j][i] = 0.0; } } for (int i = numVar-1; i > -1; --i) for (int j = numVar-1; j > -1; --j) { inputb[i*numVar + j] = inputb[i*numVar + j] + Lb[i][j]; Lb[i][j] = 0.0; } inputb[numVar*numVar] = Mb[numVar*numVar]; #if 0 printf("inputb = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputb[i*numVar+j]); } printf("\n"); } printf("sigmab = %10.7f\n", inputb[numVar*numVar]); #endif hipFree(dataDevice); hipFree(kernelValuesDeviceb); hipFree(kernelValuesDevice); hipFree(lossSumDeviceb); hipFree(lossSumDevice); hipFree(MDeviceb); delete[] lossSumHost; delete[] lossSumHostb; delete[] kernelValuesHostb; } float kernelRegressor(fmat &X, fvec &y, frowvec &xp, fmat &M, float sigma) { int d = y.size(); fvec kernelVal(d); fvec weight(d); float kernelSum = 0.0; float yhat = 0.0; for (int i = 0; i < d; i++) { frowvec xi = X.row(i); kernelVal(i) = gaussianKernel(xi, xp, sigma, M); kernelSum += kernelVal(i); } for (int i = 0; i < d; i++) { weight(i) = kernelVal(i) / kernelSum; yhat += y(i) * weight(i); #if 0 printf("y(%d) * weight(%d) = %10.7f * %10.7f\n",i,i,y(i),weight(i) ); #endif } return yhat; } /* * train the Mahalanobis matrix M and bandwidth parameter sigma * @param[in] data: sample data matrix (normalized values) * @param[in] max_cv_iter: number of iterations for cross validation loop * @param[out] wSvd: weight for svd regularization * @param[out] w12: weight for mixed 12norm regularization * @param[out] M: Mahalanobis matrix * @param[out] sigma: bandwidth parameter for the Gaussian kernel * * */ int trainMahalanobisDistance(fmat &L, fmat &data, float &sigma, float &wSvd, float &w12,int max_cv_iter) { int max_opt_iter = 40000; unsigned int n = L.n_cols; unsigned int m = L.n_cols; float alpha = 0.9; if(m != n || m!=numVar || n!=numVar){ fprintf(stderr,"Cols: %d and Rows: %d\n",n, m); fprintf(stderr,"Error: The Mahalanobis matrix is not square!\n"); exit(-1); } int Ldim = numVar*numVar; /* lower diagonal matrix Lbest to keep the best L*/ fmat bestL(numVar,numVar); bestL.fill(0.0); float bestsigma = 0.0; /* divide the data set into training and validation sets */ unsigned int N = data.n_rows; /* size of the validation set, default to one fifth */ unsigned int NvalidationSet = N/5; unsigned int Ntraining = N - NvalidationSet; #if 1 printf("number of training samples (core) = %d\n",Ntraining); printf("number of validation samples = %d\n",NvalidationSet); #endif fmat dataTraining = data.submat( 0, 0, Ntraining-1, numVar ); fmat dataValidation = data.submat( Ntraining, 0, N-1, numVar ); fmat XValidation = dataValidation.submat(0,0,NvalidationSet-1,numVar-1); fvec yValidation = dataValidation.col(numVar); fmat XTraining = dataTraining.submat(0,0,Ntraining-1,numVar-1); fvec yTraining = dataTraining.col(numVar); #if 0 printf("Training data set = \n"); dataTraining.print(); printf("Validation data set = \n"); dataValidation.print(); #endif #if 0 printf("XTraining = \n"); XTraining.print(); printf("yTraining = \n"); yTraining.print(); #endif #if 0 printf("XValidation = \n"); XValidation.print(); printf("yValidation = \n"); yValidation.print(); #endif fvec wSvdtrial(max_cv_iter); fvec w12trial(max_cv_iter); if(max_cv_iter !=1){ for(int i=0; i<max_cv_iter; i++){ wSvdtrial(i) = pow(10.0,RandomFloat(-2,0.0)); w12trial(i) = pow(10.0,RandomFloat(-2,0.0)); } #if 1 printf("wSvdtrial = \n"); wSvdtrial.print(); printf("w12trial = \n"); w12trial.print(); #endif } float *inputVec = new float[Ldim+1](); float *inputVecVel = new float[Ldim+1](); float *inputVecLocalBest = new float[Ldim+1](); float *inputVecb = new float[Ldim+1](); float *inputVecRegb = new float[Ldim](); float *gradientVec = new float[Ldim+1](); float *dataVecTraining = new float[Ntraining*(n+1)](); #if 0 printf("L = \n"); for (int i = 0; i < numVar; i++){ for (int j = 0; j < numVar; j++) { printf("%10.7f ",inputVec[i*numVar+j]); } printf("\n"); } printf("sigma = %10.7f\n",inputVec[Ldim]); #endif #if 1 printf("copying training data...\n"); #endif for (int i = 0; i < Ntraining; i++) { for (int j = 0; j < numVar+1; j++) { dataVecTraining[i*(n+1)+j ] = dataTraining(i, j); } } #if 1 printf("data copied = \n"); for (int i = 0; i < Ntraining; i++) { for (int j = 0; j < numVar+1; j++) { printf("%10.7f ",dataVecTraining[i*(n+1)+j ]); } printf("\n"); } #endif float optGenError = 10E14; /* cross validation loop to tune the weights for the regularization parameters */ for(int iter_cv=0; iter_cv< max_cv_iter; iter_cv++){ float learning_rateM = 0.0001; float learning_rateSigma = learning_rateM * 0.01; if(max_cv_iter !=1){ wSvd = wSvdtrial(iter_cv); w12 = w12trial(iter_cv); } #if 1 printf("Outer iteration = %d\n",iter_cv); printf("wSvd = %10.7f, w12 = %10.7f\n",wSvd,w12); #endif /* initialize the L matrix and sigma => everything is saved in the vector "inputVec" */ for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { inputVec[i*numVar+j] = 0.0; } for (int i = 0; i < numVar; i++) { for (int j = 0; j <= i; j++) { if(i ==j) { /* main diagonal */ inputVec[i*numVar+j] = 1.0+ RandomFloat(-0.1,0.1); } else { inputVec[i*numVar+j] = RandomFloat(0.0,0.1); } } } /* assign sigma */ inputVec[Ldim] = RandomFloat(0.0,0.1); float lossVal,lossValb, regTerm; float objFunVal; lossVal = 0.0; lossValb = 1.0; for(int i=0;i<Ldim+1;i++) { inputVecb[i] = 0.0; } /* calculate the first gradient vector */ printf("Evaluating the first gradient...\n"); calcLossFunGPU_b(&lossVal, &lossValb, inputVec,inputVecb, dataVecTraining,Ntraining); printf("initial Loss (GPU Version)= %10.7f\n", lossVal); #if 1 printf("gradient of the loss term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecb[i*numVar+j]); } printf("\n"); } printf("sigma sensitivity = %10.7f\n", inputVecb[Ldim]); #endif for(int i=0;i<Ldim+1;i++) { gradientVec[i]=inputVecb[i]; } #if 0 /* call the CodiPack version for validation */ codi::RealReverse *inputVecCodi = new codi::RealReverse[n*n+1]; for(int i=0; i<n*n+1; i++){ inputVecCodi[i] = inputVec[i]; } codi::RealReverse lossValCodi = 0.0; float *inputVecbCodi = new float[n*n+1](); /* call the CodiPack version of "calcLossFunCPU" */ printf("calling calcLossFunCPU (reverse AD)...\n"); calcLossFunCPU(&lossValCodi,inputVecCodi, inputVecbCodi, dataVecTraining, Ntraining); printf("Lb (codipack result)= \n"); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%10.7f ", inputVecbCodi[i*n+j]); } printf("\n"); } printf("sigmab = %10.7f\n", inputVecbCodi[n*n]); printf("lossValCodi = %10.7f\n", lossValCodi.getValue()); #endif #if 0 printf("calculating regularization term...\n"); #endif for(int i=0;i<Ldim;i++) { inputVecRegb[i] = 0.0; } /* call the adjoint mode of the function to compute the regularization term */ calcRegTerms(inputVec, inputVecRegb, &regTerm, wSvd, w12, n); #if 0 printf("gradient of the regularization term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecRegb[i*numVar+j]); } printf("\n"); } #endif objFunVal = lossVal + regTerm; printf("initial value of the objective function = %10.7f\n",objFunVal); /* add the regularization sensitivities to the gradient vector */ for(int i=0;i<Ldim;i++) { gradientVec[i]+=inputVecRegb[i]; } #if 0 /* validation loop for the regularization term */ float f0 = 0.0; float tempSave; calcRegTerms(inputVec, &f0, wSvd, w12, n); printf("f0 = %10.7f\n",f0); float epsValReg= 0.001; for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) { printf("validating the (%d,%d) th element of M\n",i,j); tempSave = inputVec[i*n+j]; inputVec[i*n+j]+=epsValReg; float f1 = 0.0; calcRegTerms(inputVec, &f1, wSvd, w12, n); printf("f1 = %10.7f, f0 = %10.7f\n",f1,f0); inputVec[i*n+j]= tempSave; float fdVal = (f1-f0)/epsValReg; printf("fd value = %10.7f, ad value = %10.7f\n",fdVal,inputVecRegb[i*n+j]); float f2,f2d; /* call forward mode */ calcRegTerms(inputVec, &f2,&f2d, wSvd, w12, n, i*n+j); printf("primal value = %10.7f, forward ad value = %10.7f, ad value = %10.7f\n",f2,f2d,inputVecRegb[i*n+j]); } } #endif /* optimization loop */ /* check gradient */ for(int i=0;i<Ldim;i++) { if( gradientVec[i] != gradientVec[i]){ printf("gradientVec[%d] is NaN!\n",i); exit(1); } } float objectiveFunLocalBest = 10E14; for(int opt_iter=0 ; opt_iter < max_opt_iter; opt_iter++){ /* update M */ for (int i = 0; i < numVar; i++){ for (int j = 0; j <= i; j++) { inputVec[i*numVar+j]= inputVec[i*numVar+j] + inputVecVel[i*numVar+j]; } } for (int i = 0; i < numVar; i++){ for (int j = 0; j <= i; j++) { if ( inputVec[i*numVar+j] < 0) { inputVec[i*numVar+j] = 10E-6; } } } /* update sigma */ inputVec[Ldim]= inputVec[Ldim] + inputVecVel[Ldim]; if(inputVec[Ldim] <= 0) { inputVec[Ldim] = 10E-06; } for(int i=0;i<Ldim+1;i++) { inputVecb[i] = 0.0; } /* calculate the gradient vector */ #if 0 printf("evaluating gradient vector...\n"); #endif calcLossFunGPU_b(&lossVal, &lossValb, inputVec,inputVecb, dataVecTraining,Ntraining); #if 0 printf("Loss (GPU Version)= %10.7f\n", lossVal); #endif for(int i=0;i<Ldim+1;i++) { gradientVec[i]=inputVecb[i]; } #if 0 printf("calculating the regularization term...\n"); #endif for(int i=0;i<Ldim;i++) { inputVecRegb[i] = 0.0; } /* call the adjoint mode of the function to compute the regularization term */ calcRegTerms(inputVec, inputVecRegb, &regTerm, wSvd, w12, n); #if 0 printf("gradient of the regularization term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecRegb[i*numVar+j]); } printf("\n"); } #endif /* add the regularization sensitivities to the gradient vector */ for(int i=0;i<Ldim;i++) { gradientVec[i]+=inputVecRegb[i]; } objFunVal = lossVal + regTerm; if(objFunVal < objectiveFunLocalBest){ objectiveFunLocalBest = objFunVal; for(int i=0;i<Ldim+1;i++) { inputVecLocalBest[i]=inputVec[i]; } } if(opt_iter % 100 == 0){ printf("iter = %d, objective function = %10.7f, Leave One Out Error = %10.7f, Regularization term = %10.7f\n",opt_iter,objFunVal,lossVal, regTerm); #if 0 printf("L = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVec[i*numVar+j]); } printf("\n"); } printf("sigma = %10.7f\n",inputVec[Ldim]); #endif } /* update velocity vector */ for(int i=0;i<Ldim;i++) { inputVecVel[i]=alpha* inputVecVel[i] - learning_rateM*gradientVec[i]; } inputVecVel[Ldim]=alpha* inputVecVel[Ldim] - learning_rateSigma*gradientVec[Ldim]; } /* end of local optimization loop */ for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { L(i,j)= inputVecLocalBest[i*numVar+j]; } #if 1 printf("local optimization result:\n"); printf("L = \n"); L.print(); printf("sigma = %10.7f\n", inputVecLocalBest[Ldim]); #endif sigma = inputVecLocalBest[Ldim]; fmat M = L*trans(L); #if 1 printf("M = \n"); M.print(); #endif float genError = 0.0; for(int i=0;i <NvalidationSet; i++){ frowvec xp = XValidation.row(i); float ytilde = kernelRegressor(XTraining, yTraining, xp, M, sigma); float yexact = yValidation(i); #if 0 printf("x:\n"); xp.print(); printf("ytilde = %10.7f, yexact = %10.7f\n",ytilde,yexact); #endif // genError += (yexact-ytilde)*(yexact-ytilde); genError += fabs(yexact-ytilde); } genError = genError/NvalidationSet; #if 1 printf("Generalization error = %10.7f\n",genError); #endif if(genError < optGenError) { #if 1 printf("Better L has been found, updating L...\n"); #endif bestL = L; bestsigma = sigma; optGenError = genError; } } /* end of cv loop */ L = bestL; sigma = bestsigma; delete[] inputVec; delete[] inputVecb; delete[] inputVecRegb; delete[] dataVecTraining; delete[] gradientVec; return 0; }
a0f7441fb16a849a320e0db919619277447292dc.cu
#include "kernel_regression_cuda.h" #include "auxilliary_functions.hpp" #include "Rodeo_macros.hpp" #include "test_functions.hpp" #include<stdio.h> #include<iostream> #include<math.h> #include <armadillo> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #include <codi.hpp> using namespace arma; //This implementation using CAS incurs a non-trivial cost though. //Had to use this because compute < 600 doesn't support atomic add with float and > 600 throws up some MemCpy - invalid code error //__device__ float atomicDAdd(float* address, float val); // //__device__ float atomicDAdd(float* address, float val) //{ // unsigned long long int* address_as_ull = // (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, // __float_as_longlong(val + // __longlong_as_float(assumed))); // } while (assumed != old); // return __longlong_as_float(old); //} //__managed__ float MDevice[numVar*numVar+1]; __constant__ float MDevice[numVar*numVar+1]; float gaussianKernel(frowvec &xi, frowvec &xj, float sigma, fmat &M) { #if 0 printf("calling gaussianKernel...\n"); xi.print(); xj.print(); #endif /* calculate distance between xi and xj with the matrix M */ float metricVal = calcMetric(xi, xj, M); #if 0 printf("metricVal = %10.7f\n",metricVal); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); kernelVal += 10E-14; #if 0 printf("kernelVal = %10.7f\n",kernelVal); #endif return kernelVal; } float SIGN(float a, float b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } codi::RealReverse SIGN(codi::RealReverse a, codi::RealReverse b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } codi::RealForward SIGN(codi::RealForward a, codi::RealForward b) { if (b >= 0.0) { return fabs(a); } else { return -fabs(a); } } float PYTHAG(float a, float b) { float at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } codi::RealReverse PYTHAG(codi::RealReverse a, codi::RealReverse b) { codi::RealReverse at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } codi::RealForward PYTHAG(codi::RealForward a, codi::RealForward b) { codi::RealForward at = fabs(a), bt = fabs(b), ct, result; if (at > bt) { ct = bt / at; result = at * sqrt(1.0 + ct * ct); } else if (bt > 0.0) { ct = at / bt; result = bt * sqrt(1.0 + ct * ct); } else result = 0.0; return (result); } /** calculate regularization terms for the given matrix L * * @param[in] L: lower diagonal matrix * @param[in] wSvd: weight for the svd regularization part * @param[in] w12: weight for the mixed 12 regularization part * @param[out] regTerm * */ int calcRegTerms(float *L, float *regTerm, float wSvd, float w12, int dim) { int flag, i, its, j, jj, k, l = 0, nm; float c, f, h, s, x, y, z; float anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; float **a; a = new float*[dim]; for (i = 0; i < dim; i++) { a[i] = new float[dim]; } float **M; M= new float*[dim]; for (i = 0; i < dim; i++) { M[i] = new float[dim]; } float **LT; LT = new float*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new float[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = L[i*dim+j]; } } #if 0 printf("L = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", L[i*dim+j]); } printf("\n"); } #endif #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += L[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j]); } printf("\n"); } #endif #if 0 /* only for validation */ mat Lval(dim,dim); mat LTval(dim,dim); mat aval(dim,dim); for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { Lval(i,j) = Lin(i,j); } LTval = trans(Lval); aval = Lval*LTval; printf("aval = \n"); aval.print(); #endif /* SVD part */ float **v; v = new float*[n]; for (i = 0; i < n; i++) { v[i] = new float[n]; } float *w = new float[n]; float *rv1 = new float[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* sort the singular values */ float temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* normalization */ float wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i]); } #endif float svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif float reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i],(i+1)*w[i]); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd); #endif float reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1); #endif for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; *regTerm = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; #if 0 printf("result = %10.7f\n",*regTerm); #endif return 0; } /* forward mode */ int calcRegTerms(float *L, float *regTerm,float *regTermd, float wSvd, float w12, int dim, int derIndx) { int flag, i, its, j, jj, k, l = 0, nm; codi::RealForward c, f, h, s, x, y, z; codi::RealForward anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; codi::RealForward *Lcodi = new codi::RealForward[dim*dim]; for (int i = 0; i < dim*dim; i++) { Lcodi[i] = L[i]; } Lcodi[derIndx].setGradient(1.0); codi::RealForward **a; a = new codi::RealForward*[dim]; for (i = 0; i < dim; i++) { a[i] = new codi::RealForward[dim]; } codi::RealForward **M; M= new codi::RealForward*[dim]; for (i = 0; i < dim; i++) { M[i] = new codi::RealForward[dim]; } codi::RealForward **LT; LT = new codi::RealForward*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new codi::RealForward[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = Lcodi[i*dim+j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += Lcodi[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j]); } printf("\n"); } #endif /* SVD part */ codi::RealForward **v; v = new codi::RealForward*[n]; for (i = 0; i < n; i++) { v[i] = new codi::RealForward[n]; } codi::RealForward *w = new codi::RealForward[n]; codi::RealForward *rv1 = new codi::RealForward[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale!= 0) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale!=0) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g!=0) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g!=0) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z!=0) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif /* sort the singular values */ codi::RealForward temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i].getValue()); } #endif /* normalization */ codi::RealForward wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum.getValue()); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i].getValue()); } #endif float svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif codi::RealForward reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i].getValue(),(i+1)*w[i].getValue()); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd.getValue()); #endif codi::RealForward reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1.getValue()); #endif for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; codi::RealForward result = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; *regTerm = result.getValue(); *regTermd = result.getGradient(); return 0; } int calcRegTerms(float *L, float *Lb,float *result , float wSvd, float w12, int dim) { int flag, i, its, j, jj, k, l = 0, nm; codi::RealReverse *Lcodi = new codi::RealReverse[dim*dim]; for (int i = 0; i < dim*dim; i++) { Lcodi[i] = L[i]; } /* activate tape and register input */ codi::RealReverse::TapeType& tape = codi::RealReverse::getGlobalTape(); tape.setActive(); codi::RealReverse regTerm=0.0; for (int i = 0; i < dim*dim; i++) { tape.registerInput(Lcodi[i]); } codi::RealReverse c, f, h, s, x, y, z; codi::RealReverse anorm = 0.0, g = 0.0, scale = 0.0; int m = dim; int n = dim; codi::RealReverse **a; a = new codi::RealReverse*[dim]; for (i = 0; i < dim; i++) { a[i] = new codi::RealReverse[dim]; } codi::RealReverse **M; M = new codi::RealReverse*[dim]; for (i = 0; i < dim; i++) { M[i] = new codi::RealReverse[dim]; } codi::RealReverse **LT; LT = new codi::RealReverse*[dim]; for (int i = 0; i < dim; i++) { LT[i] = new codi::RealReverse[dim]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { LT[i][j]=0.0; } for (int i = 0; i < dim; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = Lcodi[i*dim+j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", LT[i][j].getValue()); } printf("\n"); } #endif for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=0; M[i][j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) for(int k = 0; k < dim; ++k) { M[i][j] += Lcodi[i*dim+k] * LT[k][j]; } for(int i = 0; i < dim; ++i) for(int j = 0; j < dim; ++j) { a[i][j]=M[i][j]; } #if 0 printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i][j].getValue()); } printf("\n"); } #endif #if 0 printf("a = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", a[i][j].getValue()); } printf("\n"); } #endif /* SVD part */ codi::RealReverse **v; v = new codi::RealReverse*[n]; for (i = 0; i < n; i++) { v[i] = new codi::RealReverse[n]; } codi::RealReverse *w = new codi::RealReverse[n]; codi::RealReverse *rv1 = new codi::RealReverse[n]; /* Householder reduction to bidiagonal form */ for (i = 0; i < n; i++) { /* left-hand reduction */ l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += fabs(a[k][i]); if (scale != 0) { for (k = i; k < m; k++) { a[k][i] = (a[k][i] / scale); s += (a[k][i] * a[k][i]); } f = a[i][i]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += (a[k][i] * a[k][j]); f = s / h; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (k = i; k < m; k++) a[k][i] = (a[k][i] * scale); } } w[i] = (scale * g); /* right-hand reduction */ g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += fabs(a[i][k]); if (scale !=0) { for (k = l; k < n; k++) { a[i][k] = (a[i][k] / scale); s += (a[i][k] * a[i][k]); } f = a[i][l]; g = -SIGN(sqrt(s), f); h = f * g - s; a[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = a[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[j][k] * a[i][k]); for (k = l; k < n; k++) a[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) a[i][k] = (a[i][k] * scale); } } anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i]))); } /* accumulate the right-hand transformation */ for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (g !=0) { for (j = l; j < n; j++) v[j][i] = ((a[i][j] / a[i][l]) / g); /* float division to avoid underflow */ for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += (a[i][k] * v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } /* accumulate the left-hand transformation */ for (i = n - 1; i >= 0; i--) { l = i + 1; g = w[i]; if (i < n - 1) for (j = l; j < n; j++) a[i][j] = 0.0; if (g != 0) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += (a[k][i] * a[k][j]); f = (s / a[i][i]) * g; for (k = i; k < m; k++) a[k][j] += (f * a[k][i]); } } for (j = i; j < m; j++) a[j][i] = (a[j][i] * g); } else { for (j = i; j < m; j++) a[j][i] = 0.0; } ++a[i][i]; } /* diagonalize the bidiagonal form */ for (k = n - 1; k >= 0; k--) { /* loop over singular values */ for (its = 0; its < 30000; its++) { /* loop over allowed iterations */ flag = 1; for (l = k; l >= 0; l--) { /* test for splitting */ nm = l - 1; if (fabs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (fabs(w[nm]) + anorm == anorm) break; } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (fabs(f) + anorm != anorm) { g = w[i]; h = PYTHAG(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = a[j][nm]; z = a[j][i]; a[j][nm] = (y * c + z * s); a[j][i] = (z * c - y * s); } } } } z = w[k]; if (l == k) { /* convergence */ if (z < 0.0) { /* make singular value nonnegative */ w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } break; } if (its >= 30000) { delete[] rv1; fprintf(stderr, "No convergence after 30,000! iterations \n"); return 1; } /* shift from bottom 2 x 2 minor */ x = w[l]; nm = k - 1; y = w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = PYTHAG(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x; /* next QR transformation */ c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = w[i]; h = s * g; g = c * g; z = PYTHAG(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = v[jj][j]; z = v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = PYTHAG(f, h); w[j] = z; if (z != 0) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = a[jj][j]; z = a[jj][i]; a[jj][j] = (y * c + z * s); a[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } delete[] rv1; #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i]); } #endif codi::RealReverse temp; for (i = 0; i < n; ++i) { for (j = i + 1; j < n; ++j) { if (w[i] < w[j]) { temp = w[i]; w[i] = w[j]; w[j] = temp; } } } #if 0 printf("singular values of a=\n"); for (i = 0; i < n; i++) { printf("%10.7f\n",w[i].getValue()); } #endif codi::RealReverse wsum = 0.0; for (i = 0; i < n; i++) { wsum += w[i]; } for (i = 0; i < n; i++) { w[i] = w[i]/wsum; } #if 0 printf("singular values of a (normalized) with wsum =%10.7f\n",wsum.getValue()); for (i = 0; i < n; i++) { printf("%15.10f\n",w[i].getValue()); } #endif codi::RealReverse svd_multiplier = (1.0*n*(1.0*n+1))/2.0; svd_multiplier = 1.0/svd_multiplier; #if 0 printf("svd_multiplier = %10.7f\n",svd_multiplier); #endif codi::RealReverse reg_term_svd = 0.0; for (i = 0; i < n; i++) { #if 0 printf("%d * %10.7f = %10.7f\n",i+1,w[i].getValue(),(i+1)*w[i].getValue()); #endif reg_term_svd = reg_term_svd + (i + 1) * w[i]; } #if 0 printf("reg_term_svd = %10.7f\n",reg_term_svd.getValue()); #endif codi::RealReverse reg_term_L1 = 0.0; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { reg_term_L1 = reg_term_L1 + M[i][j]* M[i][j]; } #if 0 printf("reg_term_L1 = %10.7f\n",reg_term_L1.getValue()); #endif regTerm = wSvd * svd_multiplier *reg_term_svd + w12 * reg_term_L1; #if 0 printf("w12 * reg_term_L1 = %10.7f\n",w12 * reg_term_L1.getValue()); #endif tape.registerOutput(regTerm); tape.setPassive(); regTerm.setGradient(1.0); tape.evaluate(); for (int i = 0; i < numVar*numVar; i++) { Lb[i] = Lcodi[i].getGradient(); } tape.reset(); *result = regTerm.getValue(); for (i = 0; i < n; i++) { delete[] v[i]; delete[] a[i]; delete[] M[i]; delete[] LT[i]; } delete[] LT; delete[] M; delete[] a; delete[] v; delete[] w; delete[] Lcodi; return 0; } float calcKernelValCPU(rowvec &xi, rowvec &xj, mat &M, float sigma){ rowvec diff = xi - xj; colvec diffT = trans(diff); vec matVecProd = M * diffT; // printf("M * xdiff = \n"); // matVecProd.print(); float metricVal = dot(diff, M * diffT); float sqr_two_pi = sqrt(2.0 * 3.14159265359); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); return (kernelVal); } /* * calculates the generalized Mahalanobis distance between two points * * @param[in] x_i : first vector * @param[in] X_j : second vector * @param[in] M : dim x dim matrix * @param[in] dim * @return distance * * */ float calcMetric(float *xi, float *xj, float *M, int dim) { #if 0 printf("calling calcMetric (primal)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j]); } printf("\n"); } #endif float *diff = new float[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) M_val(i, j) = M[i][j]; diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif float *tempVec = new float[dim]; float sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for(int i=0; i<dim; i++) { printf("%10.7f \n",tempVec[i] ); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n",sum); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at FILE = %s, LINE = %d.\n",__FILE__, __LINE__); exit(-1); } return sum; } /* * calculates the generalized Mahalanobis distance between two points, codiPack reverse mode * (differentiated in reverse mode ) * @param[in] x_i : first vector * @param[in] X_j : second vector * @param[in] M : dim x dim matrix * @param[in] dim * @return distance * * */ codi::RealReverse calcMetric(float *xi, float *xj, codi::RealReverse *M, int dim) { #if 0 printf("calling calcMetric (adjoint)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } #endif codi::RealReverse *diff = new codi::RealReverse[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { M_val(i, j) = M[i*dim+j].getValue(); } diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif codi::RealReverse *tempVec = new codi::RealReverse[dim]; codi::RealReverse sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for (int i = 0; i < dim; i++) { printf("%10.7f \n", tempVec[i].getValue()); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n", sum.getValue()); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "metric val = %10.7f\n",sum.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } return sum; } codi::RealForward calcMetric(float *xi, float *xj, codi::RealForward *M, int dim) { #if 0 printf("calling calcMetric (adjoint)...\n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } #endif codi::RealForward *diff = new codi::RealForward[dim]; for (int i = 0; i < dim; i++) { diff[i] = xi[i] - xj[i]; } #if 0 rowvec xi_val(dim); rowvec xj_val(dim); rowvec diff_val(dim); mat M_val(dim, dim); for (int i = 0; i < dim; i++) { xi_val(i) = xi[i]; xj_val(i) = xj[i]; } for (int i = 0; i < dim; i++) for (int j = 0; j < dim; j++) { M_val(i, j) = M[i*dim+j].getValue(); } diff_val = xi_val - xj_val; printf("diff_val=\n"); diff_val.print(); colvec diffT = trans(diff_val); vec matVecProd = M_val * diffT; printf("M * xdiff = \n"); matVecProd.print(); float metric_val = dot(diff_val, M_val * diffT); printf("metric_val = %10.7f\n", metric_val); #endif codi::RealForward *tempVec = new codi::RealForward[dim]; codi::RealForward sum = 0.0; for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { sum = sum + M[i*dim+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } #if 0 printf("tempVec = \n"); for (int i = 0; i < dim; i++) { printf("%10.7f \n", tempVec[i].getValue()); } #endif sum = 0.0; for (int i = 0; i < dim; i++) { sum = sum + tempVec[i] * diff[i]; } #if 0 printf("sum = %10.7f\n", sum.getValue()); #endif delete[] diff; delete[] tempVec; if (sum < 0.0) { fprintf(stderr, "Error: metric is negative! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "metric val = %10.7f\n",sum.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } return sum; } float gaussianKernel(float *xi, float *xj, float sigma, float *M, int dim) { #if 0 printf("calling gaussianKernel...\n"); #endif /* calculate distance between xi and xj with the matrix M */ float metricVal = calcMetric(xi, xj, M, dim); #if 0 printf("metricVal = %10.7f\n",metricVal); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); #if 0 printf("kernelVal = %10.7f\n",kernelVal); #endif if(isnan(kernelVal)){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); exit(-1); } if(kernelVal < 0.0){ fprintf(stderr, "Error: kernel value is negative! at %s, line %d.\n",__FILE__, __LINE__); exit(-1); } kernelVal += 10E-14; return kernelVal; } codi::RealReverse gaussianKernel(float *xi, float *xj, codi::RealReverse sigma, codi::RealReverse *M, int dim) { #if 0 printf("calling gaussianKernel...\n"); #endif /* calculate distance between xi and xj with the matrix M */ codi::RealReverse metricVal = calcMetric(xi, xj, M, dim); #if 0 printf("metricVal = %10.7f\n",metricVal.getValue()); #endif float sqr_two_pi = sqrt(2.0 * datum::pi); codi::RealReverse kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); if(isnan(kernelVal.getValue())){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); printf("sigma = %10.7f\n",sigma.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } kernelVal += 10E-14; // printf("EPSILON = %10.7f ", EPSILON); if(kernelVal.getValue() < 0.0){ fprintf(stderr, "Error: kernel value is negative or zero! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "kernelVal = %20.15f\n",kernelVal.getValue() ); fprintf(stderr, "metric val = %20.15f\n",metricVal.getValue()); fprintf(stderr, "sigma = %20.15f\n",sigma.getValue()); fprintf(stderr, "exp(-metricVal / (2 * sigma * sigma)) = %20.15f\n",exp(-metricVal / (2 * sigma * sigma)).getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } #if 0 printf("kernelVal = %10.7f\n",kernelVal.getValue()); #endif return kernelVal; } codi::RealForward gaussianKernel(float *xi, float *xj, codi::RealForward sigma, codi::RealForward *M, int dim) { /* calculate distance between xi and xj with the matrix M */ codi::RealForward metricVal = calcMetric(xi, xj, M, dim); float sqr_two_pi = sqrt(2.0 * datum::pi); codi::RealForward kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-metricVal / (2 * sigma * sigma)); if(isnan(kernelVal.getValue())){ fprintf(stderr, "Error: kernel value is NaN! at %s, line %d.\n",__FILE__, __LINE__); printf("sigma = %10.7f\n",sigma.getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } kernelVal += 10E-14; // printf("EPSILON = %10.7f ", EPSILON); if(kernelVal.getValue() < 0.0){ fprintf(stderr, "Error: kernel value is negative or zero! at %s, line %d.\n",__FILE__, __LINE__); fprintf(stderr, "kernelVal = %20.15f\n",kernelVal.getValue() ); fprintf(stderr, "metric val = %20.15f\n",metricVal.getValue()); fprintf(stderr, "sigma = %20.15f\n",sigma.getValue()); fprintf(stderr, "exp(-metricVal / (2 * sigma * sigma)) = %20.15f\n",exp(-metricVal / (2 * sigma * sigma)).getValue()); printf("M = \n"); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { printf("%10.7f ", M[i*dim+j].getValue()); } printf("\n"); } exit(-1); } #if 0 printf("kernelVal = %10.7f\n",kernelVal.getValue()); #endif return kernelVal; } void calcLossFunCPU(float *result, float *input, float *data,int N){ float LT[numVar][numVar]; float L[numVar][numVar]; float M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) LT[i][j] = 0.0; for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j) LT[j][i] = L[i][j]; } for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; M[numVar*numVar] = input[numVar*numVar]; float sigma = M[numVar*numVar]; float *xp = new float[numVar]; float *xi = new float[numVar]; float *kernelVal = new float[N]; float lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif for (int k = 0; k < numVar; k++) { xp[k] = data[i*(numVar+1)+k]; } float kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { for (int k = 0; k < numVar; k++) { xi[k] = data[j*(numVar+1)+k]; } kernelVal[j] = gaussianKernel(xi, xp, sigma, M, numVar); kernelSum += kernelVal[j]; #if 0 printf("kernelVal[%d]=%10.7f\n",j,kernelVal[j]); #endif } } float fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelVal[j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; *result = lossFunc; delete[] xp; delete[] xi; delete[] kernelVal; } void calcLossFunCPU(codi::RealReverse *result, codi::RealReverse *input, float *inputb,float *data,int N){ /* activate tape and register input */ codi::RealReverse::TapeType& tape = codi::RealReverse::getGlobalTape(); tape.setActive(); for (int i = 0; i < numVar*numVar+1; i++) { tape.registerInput(input[i]); } codi::RealReverse LT[numVar][numVar]; codi::RealReverse L[numVar][numVar]; codi::RealReverse M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { LT[i][j] = 0.0; } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j){ LT[j][i] = L[i][j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j].getValue()); } printf("\n"); } #endif /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) { M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getValue()); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; codi::RealReverse sigma = M[numVar*numVar]; float *xi = new float[numVar]; float *xj = new float[numVar]; codi::RealReverse **kernelValTable = new codi::RealReverse*[N]; for(int i=0; i<N;i++) { kernelValTable[i] = new codi::RealReverse[N]; } for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) kernelValTable[i][j] = 0.0; for (int i = 0; i < N; i++) { for (int j = i+1; j < N; j++) { for (int k = 0; k < numVar; k++) { xi[k] = data[i*(numVar+1)+k]; xj[k] = data[j*(numVar+1)+k]; } kernelValTable[i][j] = gaussianKernel(xi, xj, sigma, M, numVar); kernelValTable[j][i] = kernelValTable[i][j]; // printf("%d kernelValTable[%d][%d] = %10.7f\n",i*N+j,i,j,kernelValTable[i][j].getValue()); } } codi::RealReverse lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif codi::RealReverse kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { kernelSum += kernelValTable[i][j]; } } codi::RealReverse fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelValTable[i][j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox.getValue()); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; #if 1 printf("lossFunc (reverse mode CodiPack) = %10.7f\n",lossFunc.getValue()); #endif *result = lossFunc; tape.registerOutput(*result); tape.setPassive(); result->setGradient(1.0); tape.evaluate(); #if 0 printf("Mb = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getGradient()); } printf("\n"); } #endif for (int i = 0; i < numVar*numVar+1; i++) { inputb[i] = input[i].getGradient(); } tape.reset(); delete[] xi; delete[] xj; for(int i=0; i<N;i++) { delete[] kernelValTable[i]; } delete[] kernelValTable; } void calcLossFunCPU(codi::RealForward *result, codi::RealForward *input,int tldIndx, float *data,int N){ input[tldIndx].setGradient(1.0); codi::RealForward LT[numVar][numVar]; codi::RealForward L[numVar][numVar]; codi::RealForward M[numVar*numVar+1]; for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) L[i][j] = input[i*numVar + j]; #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { LT[i][j] = 0.0; } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j){ LT[j][i] = L[i][j]; } } #if 0 printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j].getValue()); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) M[i*numVar + j] = 0; /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) { M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j].getValue()); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; codi::RealForward sigma = M[numVar*numVar]; float *xi = new float[numVar]; float *xj = new float[numVar]; codi::RealForward **kernelValTable = new codi::RealForward*[N]; for(int i=0; i<N;i++) { kernelValTable[i] = new codi::RealForward[N]; } for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) kernelValTable[i][j] = 0.0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { if(j>i){ for (int k = 0; k < numVar; k++) { xi[k] = data[i*(numVar+1)+k]; xj[k] = data[j*(numVar+1)+k]; } kernelValTable[i][j] = gaussianKernel(xi, xj, sigma, M, numVar); } } } codi::RealForward lossFunc = 0.0; for (int i = 0; i < N; i++) { #if 0 printf("kernel regression for the sample number %d\n",i); #endif codi::RealForward kernelSum = 0.0; for (int j = 0; j < N; j++) { if (i != j) { kernelSum += kernelValTable[i][j]; } } codi::RealForward fApprox = 0.0; for (int j = 0; j < N; j++) { if (i != j) { fApprox += kernelValTable[i][j] * data[j*(numVar+1)+numVar]; } } fApprox = fApprox / kernelSum; #if 0 printf("fApprox = %10.7f\n",fApprox.getValue()); printf("fExact = %10.7f\n",data[i*(numVar+1)+numVar]); #endif lossFunc += (fApprox - data[i*(numVar+1)+numVar]) * (fApprox - data[i*(numVar+1)+numVar]); } // end of i loop lossFunc = lossFunc / N; #if 0 printf("lossFunc = %10.7f\n",lossFunc.getValue()); #endif *result = lossFunc; delete[] xi; delete[] xj; for(int i=0; i<N;i++) { delete[] kernelValTable[i]; } delete[] kernelValTable; } __global__ void calculateKernelValues_b(float *ab, float *X, float *kernelValTable, float *kernelValTableb, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; float sigma = MDevice[numVar*numVar]; float sigmab = 0.0; /* calculate column index */ int indx2 = tid%N; /* calculate row index */ int indx1 = tid/N; if (indx2 > indx1) { int off1 = indx1*(numVar+1); int off2 = indx2*(numVar+1); float diff[numVar]; float tempVec[numVar]; float tempVecb[numVar]; float sumb = 0.0; float kernelValb = 0.0; float temp; float temp0; float tempb; float tempb0; for (int k = 0; k < numVar; ++k) diff[k] = X[off1 + k] - X[off2 + k]; float sum = 0.0; for (int i = 0; i < numVar; ++i) { for (int j = 0; j < numVar; ++j) sum = sum + MDevice[i*numVar+j]*diff[j]; tempVec[i] = sum; sum = 0.0; } sum = 0.0; for (int i = 0; i < numVar; ++i) sum = sum + tempVec[i]*diff[i]; float sqr_two_pi; sqr_two_pi = sqrt(2.0*3.14159265359); float kernelVal = 1.0/(sigma*sqr_two_pi)*exp(-sum/(2*sigma*sigma))+10E-12; kernelValb = kernelValTableb[indx1*N + indx2]; kernelValTableb[indx1*N + indx2] = 0.0; tempb = kernelValb/(sqr_two_pi*sigma); temp = 2*(sigma*sigma); temp0 = sum/temp; // temp0 = sum/2*(sigma*sigma) tempb0 = -(exp(-temp0)*tempb/temp); // -(exp(-sum/2*(sigma*sigma))*kernelValb/(sqr_two_pi*sigma)/temp) sumb = tempb0; sigmab = -(exp(-temp0)*tempb/sigma) - 2*2*temp0*sigma*tempb0; for (int i = 0; i < numVar; ++i){ tempVecb[i] = 0.0; } for (int i = numVar-1; i > -1; --i){ tempVecb[i] = tempVecb[i] + diff[i]*sumb; } for (int i = numVar-1; i > -1; --i) { sumb = tempVecb[i]; tempVecb[i] = 0.0; for (int j = numVar-1; j > -1; --j){ float addTerm = diff[j]*sumb; atomicAdd( &ab[i*numVar + j],addTerm ); } // ab[i*numVar + j] = ab[i*numVar + j] + diff[j]*sumb; } } atomicAdd( &ab[numVar*numVar],sigmab ); // ab[numVar*numVar] = ab[numVar*numVar] + sigmab; } __global__ void calculateKernelValues(float *X, float *kernelValTable, int N){ int tid = threadIdx.x + blockIdx.x * blockDim.x; float sigma = MDevice[numVar*numVar]; /* calculate column index */ int indx2 = tid%N; /* calculate row index */ int indx1 = tid/N; if(indx2 > indx1){ int off1 = indx1*(numVar+1); int off2 = indx2*(numVar+1); float diff[numVar]; for (int k = 0; k < numVar; k++) { diff[k] = X[off1+k] - X[off2+k]; } float tempVec[numVar]; float sum = 0.0; for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { sum = sum + MDevice[i*numVar+j] * diff[j]; } tempVec[i] = sum; sum = 0.0; } sum = 0.0; for (int i = 0; i < numVar; i++) { sum = sum + tempVec[i] * diff[i]; } float sqr_two_pi = sqrt(2.0 * 3.14159265359); float kernelVal = (1.0 / (sigma * sqr_two_pi))* exp(-sum / (2 * sigma * sigma)) + 10E-12; kernelValTable[indx1*N+indx2]= kernelVal; } } __global__ void calculateLossKernel(float *X,float *kernelValTable, float *sum, int N){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < N){ float lossFunc = 0.0; float kernelSum = 0.0; for(int i=0; i<N; i++){ if(tid != i){ int indxKernelValTable; if(i<tid) { indxKernelValTable = i*N+tid; } else{ indxKernelValTable = tid*N+i; } kernelSum += kernelValTable[indxKernelValTable]; } } float fapprox=0.0; for(int i=0; i<N; i++){ if(tid != i){ int indxKernelValTable; if(i<tid) { indxKernelValTable = i*N+tid; } else{ indxKernelValTable = tid*N+i; } fapprox += (kernelValTable[indxKernelValTable]/kernelSum)* X[i*(numVar+1)+numVar]; } } // lossFunc = (fapprox - X[tid*(numVar+1)+numVar]) * (fapprox - X[tid*(numVar+1)+numVar]); lossFunc = fabs(fapprox - X[tid*(numVar+1)+numVar]); sum[tid] = lossFunc; } } __global__ void calculateLossKernel_b(float *X, float *kernelValTable, float * kernelValTableb, float *sum, float *sumb, int N ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { float lossFunc; float lossFuncb; float kernelSum=0.0; float kernelSumb; float fapproxb; for (int i = 0; i < N; ++i){ if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; kernelSum = kernelSum + kernelValTable[indxKernelValTable]; } } float fapprox = 0.0; for (int i = 0; i < N; ++i){ if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; fapprox = fapprox + kernelValTable[indxKernelValTable]/ kernelSum*X[i*(numVar+1)+numVar]; #if 0 if (isnan (fapprox ) || isinf (fapprox) ){ printf("fapprox is NaN or inf %10.7f\n",kernelSum); assert(0); } #endif } } // lossFunc = (fapprox - X[tid*(numVar+1)+numVar]) * (fapprox - X[tid*(numVar+1)+numVar]); lossFunc = fabs ( (fapprox - X[tid*(numVar+1)+numVar]) ); sum[tid] = lossFunc; lossFuncb = sumb[tid]; #if 0 if (isnan (sumb[tid] ) || isinf (sumb[tid]) ){ printf("sumb[tid] is NaN or inf!\n"); } #endif sumb[tid] = 0.0; // fapproxb = 2*(fapprox-X[tid*(numVar+1)+numVar])*lossFuncb; if((fapprox - X[tid*(numVar+1)+numVar]) >= 0){ fapproxb = lossFuncb; } else{ fapproxb = -lossFuncb; } kernelSumb = 0.0; for (int i = N-1; i > -1; --i) { if (tid != i) { float tempb; int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; tempb = X[i*(numVar+1)+numVar]*fapproxb/kernelSum; kernelValTableb[indxKernelValTable] = kernelValTableb[indxKernelValTable] + tempb; kernelSumb = kernelSumb - kernelValTable[indxKernelValTable]* tempb/kernelSum; } } for (int i = N-1; i > -1; --i) { if (tid != i) { int indxKernelValTable; if (i < tid) indxKernelValTable = i*N + tid; else indxKernelValTable = tid*N + i; kernelValTableb[indxKernelValTable] = kernelValTableb[indxKernelValTable] + kernelSumb; } } } } void calcLossFunGPU(float *result, float *input, float *data,int N){ cudaEvent_t start, stop; cudaEventCreate( &start ) ; cudaEventCreate( &stop ) ; cudaEventRecord( start, 0 ) ; // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; float LT[numVar][numVar]; float L[numVar][numVar]; float M[numVar*numVar+1]; for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { L[i][j]=input[i*numVar+j]; } #if 1 printf("Data (host) = \n"); for (int i = 0; i < N; i++) { for (int j = 0; j < numVar+1; j++) { printf("%10.7f ", data[i*(numVar+1)+j]); } printf("\n"); } #endif #if 1 printf("L = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", L[i][j]); } printf("\n"); } #endif for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { LT[i][j]=0.0; } for (int i = 0; i < numVar; i++) { for (int j = 0; j <= i; j++){ LT[j][i] = L[i][j]; } } #if 1 printf("LT = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", LT[i][j]); } printf("\n"); } #endif for(int i = 0; i < numVar; ++i) for(int j = 0; j < numVar; ++j) { M[i*numVar+j]=0; } /* Multiplying matrix L and LT and storing in M */ for(int i = 0; i < numVar; ++i) for(int j = 0; j < numVar; ++j) for(int k = 0; k < numVar; ++k) { M[i*numVar+j] += L[i][k] * LT[k][j]; } #if 0 printf("M = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", M[i*numVar+j]); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; /* copy the values of M to the constant memory */ err= cudaMemcpyToSymbol(MDevice,M, (numVar*numVar+1)*sizeof(float)); //for(int i=0; i<numVar*numVar+1; i++)MDevice[i] = M[i]; if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *dataDevice; // allocate the memory on the GPU for the data matrix err = cudaMalloc(&dataDevice, N *(numVar+1) * sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector data (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(dataDevice, data, N *(numVar+1) *sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector data from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *kernelValuesDevice; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&kernelValuesDevice, N*N* sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector kernel values (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; printf("Launching the first kernel with %d blocks...\n",number_of_blocks); calculateKernelValues<<<number_of_blocks,number_of_threads_per_block>>>(dataDevice, kernelValuesDevice, N); cudaDeviceSynchronize(); printf("Kernel: calculateKernelValues is done ...\n"); #if 1 /* this part is for validation */ mat Mval(numVar,numVar); mat Xval(N,numVar); vec ys(N); float *kernelValuesHost = new float[N*N]; err = cudaMemcpy(kernelValuesHost, kernelValuesDevice, N*N*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector kernelValues from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for(int i=0; i<numVar; i++){ for(int j=0; j<numVar; j++){ Mval(i,j) = M[i*numVar+j]; } } printf("Mval = \n"); Mval.print(); for(int i=0; i<N; i++){ for(int j=0; j<numVar; j++){ Xval(i,j) = data[i*(numVar+1)+j]; } ys(i) = data[i*(numVar+1)+(numVar)]; } printf("Xval = \n"); Xval.print(); printf("ys = \n"); ys.print(); float sigma = input[numVar*numVar]; rowvec xi,xj; for(int i=0; i<N; i++){ for(int j=i+1; j<N; j++){ xi = Xval.row(i); xj = Xval.row(j); float kernelValCPU = calcKernelValCPU(xi, xj, Mval, sigma); float kernelValGPU = kernelValuesHost[i*N+j]; printf("kernelValCPU = %19.7f, kernelValGPU = %19.7f, error = %15.12f\n",kernelValCPU,kernelValGPU,kernelValCPU-kernelValGPU); } } delete[] kernelValuesHost; #endif /* allocate the memory on the GPU for the kernelsum */ float *lossSumDevice; err = cudaMalloc(&lossSumDevice, N * sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector lossSumDevice (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *lossSumHost = new float[N]; number_of_blocks = (N+number_of_threads_per_block-1)/number_of_threads_per_block; printf("Launching the second kernel with %d blocks...\n",number_of_blocks); calculateLossKernel<<<number_of_blocks,number_of_threads_per_block>>>(dataDevice,kernelValuesDevice, lossSumDevice, N); cudaDeviceSynchronize(); printf("Kernel: calculateLossKernel is done ...\n"); err = cudaMemcpy(lossSumHost, lossSumDevice, N*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector lossSum from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } #if 0 vec lossValCPU(N); /* this part is for validation */ for (int i=0; i<N; i++) { rowvec xi = Xval.row(i); float kernelSum=0.0; for(int j=0; j<N; j++){ rowvec xj = Xval.row(j); if(i !=j){ float kernelVal = calcKernelValCPU(xi, xj, Mval, sigma); kernelSum += kernelVal; } } float sum = 0.0; for(int j=0; j<N; j++){ rowvec xj = Xval.row(j); float kernelVal = calcKernelValCPU(xi, xj, Mval, sigma); if(i !=j){ sum+=ys(j)*kernelVal; } } sum = sum/kernelSum; lossValCPU(i) = (ys(i)-sum)*(ys(i)-sum); } for (int i=0; i<N; i++) { printf( "lossGPU[%d] = %10.7f, lossGPU[%d] = %10.7f\n", i,lossSumHost[i],i, lossValCPU(i)); } #endif float totalLoss=0.0; for (int i=0; i<N; i++) { totalLoss+=lossSumHost[i]; } *result = totalLoss/N; cudaEventRecord( stop, 0 ) ; cudaEventSynchronize( stop ) ; float elapsedTime; cudaEventElapsedTime( &elapsedTime,start, stop ) ; printf( "Time to generate:%3.1f ms\n", elapsedTime ); cudaEventDestroy( start ) ; cudaEventDestroy( stop ) ; delete[] lossSumHost; cudaFree(lossSumDevice); cudaFree(kernelValuesDevice); cudaFree(dataDevice); } void calcLossFunGPU_b(float *result, float *resultb, float *input, float *inputb, float *data, int N) { #if 0 printf("calling calcLossFunGPU_b...\n"); printf("resultb = %10.7f\n",*resultb); printf("Data has %d points\n",N); #endif #if 0 cudaEvent_t start, stop; cudaEventCreate( &start ) ; cudaEventCreate( &stop ) ; cudaEventRecord( start, 0 ) ; #endif // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; float LT[numVar][numVar]; float LTb[numVar][numVar]; float L[numVar][numVar]; float Lb[numVar][numVar]; float M[numVar*numVar + 1]; float Mb[numVar*numVar + 1]; for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ L[i][j] = input[i*numVar + j]; Lb[i][j] = 0.0; LT[i][j] = 0.0; LTb[i][j] = 0.0; } } for (int i = 0; i < numVar; ++i) { for (int j = 0; j < i+1; ++j) LT[j][i] = L[i][j]; } #if 0 printf("L = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",L[i][j]); } printf("\n"); } printf("LT = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",LT[i][j]); } printf("\n"); } #endif for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) { M[i*numVar + j] = 0; Mb[i*numVar + j] = 0; } /* Multiplying matrix L and LT and storing in M */ for (int i = 0; i < numVar; ++i) for (int j = 0; j < numVar; ++j) for (int k = 0; k < numVar; ++k) M[i*numVar + j] = M[i*numVar + j] + L[i][k]*LT[k][j]; #if 0 printf("M = \n"); for (int i = 0; i < numVar; ++i){ for (int j = 0; j < numVar; ++j){ printf("%10.7f ",M[i*numVar + j]); } printf("\n"); } #endif M[numVar*numVar] = input[numVar*numVar]; #if 0 printf("sigma = %10.7f\n", M[numVar*numVar]); #endif /* copy the values of M to the constant memory "MDevice"*/ err= cudaMemcpyToSymbol(MDevice,M, (numVar*numVar+1)*sizeof(float)); //for(int i=0; i<numVar*numVar+1; i++)MDevice[i] = M[i]; if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *dataDevice; /* allocate the memory on the GPU for the data matrix */ err = cudaMalloc(&dataDevice, N *(numVar+1) * sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector data (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(dataDevice, data, N *(numVar+1) *sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector data from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *kernelValuesDevice; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&kernelValuesDevice, N*N* sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector --kernelValuesDevice-- (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMemset(kernelValuesDevice, 0, N*N* sizeof(float)); float *kernelValuesDeviceb; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&kernelValuesDeviceb, N*N* sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector --kernelValuesDeviceb-- (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // cudaMemset(kernelValuesDeviceb, 0, N*N* sizeof(float)); float *kernelValuesHostb = new float[N*N]; for(int i=0; i<N*N; i++) { kernelValuesHostb[i] = 0.0; } err = cudaMemcpy(kernelValuesDeviceb, kernelValuesHostb, (N*N) *sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector kernelValuesDeviceb from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *MDeviceb; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&MDeviceb, (numVar*numVar + 1)* sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector MDeviceb (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *MHostb = new float[numVar*numVar + 1]; for(int i=0; i<numVar*numVar + 1; i++) { MHostb[i] = 0.0; } err = cudaMemcpy(MDeviceb, MHostb, (numVar*numVar + 1) *sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector MHostb from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } delete[] MHostb; /* init adjoint of M to zero */ // cudaMemset(MDeviceb, 0, (numVar*numVar + 1)* sizeof(float)); int number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the first primal kernel with %d blocks...\n",number_of_blocks); #endif calculateKernelValues<<<number_of_blocks,number_of_threads_per_block>>>(dataDevice, kernelValuesDevice, N); cudaDeviceSynchronize(); #if 0 printf("The primal kernel : calculateKernelValues is done...\n"); #endif float *lossSumDevice; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&lossSumDevice, N*sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector --lossSumDevice-- (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMemset(lossSumDevice,0,N*sizeof(float)); number_of_blocks = (N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the second primal kernel + adjoint with %d blocks...\n",number_of_blocks); #endif float totalLossb = 0.0; totalLossb = *resultb/N; float *lossSumHostb = new float[N]; for(int i=0; i<N;i++) lossSumHostb[i] = 0;; float *lossSumDeviceb; // allocate the memory on the GPU for kernel Values err = cudaMalloc(&lossSumDeviceb, N*sizeof(float) ) ; if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector --lossSumDeviceb-- (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMemset(lossSumDeviceb,0,N*sizeof(float)); for (int i = N-1; i > -1; --i) lossSumHostb[i] = lossSumHostb[i] + totalLossb; #if 0 for (int i = N-1; i > -1; --i) printf("lossSumHostb[i] = %10.7f\n",i,lossSumHostb[i]); #endif err = cudaMemcpy(lossSumDeviceb, lossSumHostb, N *sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector --lossSumDevice-- from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // cudaMemset(kernelValuesDeviceb, 0, (N*N)* sizeof(float)); /* this subroutine evaluates the lossSumDevice and kernelValuesDeviceb */ calculateLossKernel_b<<<number_of_blocks,number_of_threads_per_block>>>(dataDevice,kernelValuesDevice,kernelValuesDeviceb, lossSumDevice,lossSumDeviceb, N); cudaDeviceSynchronize(); //cudaDeviceSynchronize(); #if 0 printf("Kernel: calculateLossKernel_b is done ...\n"); #endif err = cudaMemcpy(kernelValuesHostb, kernelValuesDeviceb, N*N *sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector --kernelValues-- from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *lossSumHost = new float[N](); err = cudaMemcpy(lossSumHost, lossSumDevice, N*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector lossSum from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float totalLoss=0.0; for (int i=0; i<N; i++) { totalLoss+=lossSumHost[i]; } *result = totalLoss/N; #if 0 printf("result = %10.7f\n",*result); #endif /* reverse sweep starts from here */ cudaMemset(MDeviceb, 0, (numVar*numVar + 1)* sizeof(float)); number_of_blocks = (N*N+number_of_threads_per_block-1)/number_of_threads_per_block; #if 0 printf("Launching the second adjoint kernel with %d blocks...\n",number_of_blocks); #endif /* this subroutine evaluates MDeviceb */ calculateKernelValues_b<<<number_of_blocks,number_of_threads_per_block>>>(MDeviceb, dataDevice, kernelValuesDevice, kernelValuesDeviceb, N); cudaDeviceSynchronize(); #if 0 printf("Kernel: calculateKernelValues_b is done ...\n"); #endif for (int ii1 = 0; ii1 < numVar*numVar+1; ++ii1) { Mb[ii1] = 0.0; } err = cudaMemcpy(Mb, MDeviceb, (numVar*numVar+1)*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector Mb from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } #if 0 printf("Mb = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", Mb[i*numVar+j]); } printf("\n"); } #endif for (int i = numVar-1; i > -1; --i) for (int j = numVar-1; j > -1; --j) for (int k = numVar-1; k > -1; --k) { Lb[i][k] = Lb[i][k] + LT[k][j]*Mb[i*numVar+j]; LTb[k][j] = LTb[k][j] + L[i][k]*Mb[i*numVar+j]; } for (int i = numVar-1; i > -1; --i) { for (int j = i; j > -1; --j) { Lb[i][j] = Lb[i][j] + LTb[j][i]; LTb[j][i] = 0.0; } } for (int i = numVar-1; i > -1; --i) for (int j = numVar-1; j > -1; --j) { inputb[i*numVar + j] = inputb[i*numVar + j] + Lb[i][j]; Lb[i][j] = 0.0; } inputb[numVar*numVar] = Mb[numVar*numVar]; #if 0 printf("inputb = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputb[i*numVar+j]); } printf("\n"); } printf("sigmab = %10.7f\n", inputb[numVar*numVar]); #endif cudaFree(dataDevice); cudaFree(kernelValuesDeviceb); cudaFree(kernelValuesDevice); cudaFree(lossSumDeviceb); cudaFree(lossSumDevice); cudaFree(MDeviceb); delete[] lossSumHost; delete[] lossSumHostb; delete[] kernelValuesHostb; } float kernelRegressor(fmat &X, fvec &y, frowvec &xp, fmat &M, float sigma) { int d = y.size(); fvec kernelVal(d); fvec weight(d); float kernelSum = 0.0; float yhat = 0.0; for (int i = 0; i < d; i++) { frowvec xi = X.row(i); kernelVal(i) = gaussianKernel(xi, xp, sigma, M); kernelSum += kernelVal(i); } for (int i = 0; i < d; i++) { weight(i) = kernelVal(i) / kernelSum; yhat += y(i) * weight(i); #if 0 printf("y(%d) * weight(%d) = %10.7f * %10.7f\n",i,i,y(i),weight(i) ); #endif } return yhat; } /* * train the Mahalanobis matrix M and bandwidth parameter sigma * @param[in] data: sample data matrix (normalized values) * @param[in] max_cv_iter: number of iterations for cross validation loop * @param[out] wSvd: weight for svd regularization * @param[out] w12: weight for mixed 12norm regularization * @param[out] M: Mahalanobis matrix * @param[out] sigma: bandwidth parameter for the Gaussian kernel * * */ int trainMahalanobisDistance(fmat &L, fmat &data, float &sigma, float &wSvd, float &w12,int max_cv_iter) { int max_opt_iter = 40000; unsigned int n = L.n_cols; unsigned int m = L.n_cols; float alpha = 0.9; if(m != n || m!=numVar || n!=numVar){ fprintf(stderr,"Cols: %d and Rows: %d\n",n, m); fprintf(stderr,"Error: The Mahalanobis matrix is not square!\n"); exit(-1); } int Ldim = numVar*numVar; /* lower diagonal matrix Lbest to keep the best L*/ fmat bestL(numVar,numVar); bestL.fill(0.0); float bestsigma = 0.0; /* divide the data set into training and validation sets */ unsigned int N = data.n_rows; /* size of the validation set, default to one fifth */ unsigned int NvalidationSet = N/5; unsigned int Ntraining = N - NvalidationSet; #if 1 printf("number of training samples (core) = %d\n",Ntraining); printf("number of validation samples = %d\n",NvalidationSet); #endif fmat dataTraining = data.submat( 0, 0, Ntraining-1, numVar ); fmat dataValidation = data.submat( Ntraining, 0, N-1, numVar ); fmat XValidation = dataValidation.submat(0,0,NvalidationSet-1,numVar-1); fvec yValidation = dataValidation.col(numVar); fmat XTraining = dataTraining.submat(0,0,Ntraining-1,numVar-1); fvec yTraining = dataTraining.col(numVar); #if 0 printf("Training data set = \n"); dataTraining.print(); printf("Validation data set = \n"); dataValidation.print(); #endif #if 0 printf("XTraining = \n"); XTraining.print(); printf("yTraining = \n"); yTraining.print(); #endif #if 0 printf("XValidation = \n"); XValidation.print(); printf("yValidation = \n"); yValidation.print(); #endif fvec wSvdtrial(max_cv_iter); fvec w12trial(max_cv_iter); if(max_cv_iter !=1){ for(int i=0; i<max_cv_iter; i++){ wSvdtrial(i) = pow(10.0,RandomFloat(-2,0.0)); w12trial(i) = pow(10.0,RandomFloat(-2,0.0)); } #if 1 printf("wSvdtrial = \n"); wSvdtrial.print(); printf("w12trial = \n"); w12trial.print(); #endif } float *inputVec = new float[Ldim+1](); float *inputVecVel = new float[Ldim+1](); float *inputVecLocalBest = new float[Ldim+1](); float *inputVecb = new float[Ldim+1](); float *inputVecRegb = new float[Ldim](); float *gradientVec = new float[Ldim+1](); float *dataVecTraining = new float[Ntraining*(n+1)](); #if 0 printf("L = \n"); for (int i = 0; i < numVar; i++){ for (int j = 0; j < numVar; j++) { printf("%10.7f ",inputVec[i*numVar+j]); } printf("\n"); } printf("sigma = %10.7f\n",inputVec[Ldim]); #endif #if 1 printf("copying training data...\n"); #endif for (int i = 0; i < Ntraining; i++) { for (int j = 0; j < numVar+1; j++) { dataVecTraining[i*(n+1)+j ] = dataTraining(i, j); } } #if 1 printf("data copied = \n"); for (int i = 0; i < Ntraining; i++) { for (int j = 0; j < numVar+1; j++) { printf("%10.7f ",dataVecTraining[i*(n+1)+j ]); } printf("\n"); } #endif float optGenError = 10E14; /* cross validation loop to tune the weights for the regularization parameters */ for(int iter_cv=0; iter_cv< max_cv_iter; iter_cv++){ float learning_rateM = 0.0001; float learning_rateSigma = learning_rateM * 0.01; if(max_cv_iter !=1){ wSvd = wSvdtrial(iter_cv); w12 = w12trial(iter_cv); } #if 1 printf("Outer iteration = %d\n",iter_cv); printf("wSvd = %10.7f, w12 = %10.7f\n",wSvd,w12); #endif /* initialize the L matrix and sigma => everything is saved in the vector "inputVec" */ for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { inputVec[i*numVar+j] = 0.0; } for (int i = 0; i < numVar; i++) { for (int j = 0; j <= i; j++) { if(i ==j) { /* main diagonal */ inputVec[i*numVar+j] = 1.0+ RandomFloat(-0.1,0.1); } else { inputVec[i*numVar+j] = RandomFloat(0.0,0.1); } } } /* assign sigma */ inputVec[Ldim] = RandomFloat(0.0,0.1); float lossVal,lossValb, regTerm; float objFunVal; lossVal = 0.0; lossValb = 1.0; for(int i=0;i<Ldim+1;i++) { inputVecb[i] = 0.0; } /* calculate the first gradient vector */ printf("Evaluating the first gradient...\n"); calcLossFunGPU_b(&lossVal, &lossValb, inputVec,inputVecb, dataVecTraining,Ntraining); printf("initial Loss (GPU Version)= %10.7f\n", lossVal); #if 1 printf("gradient of the loss term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecb[i*numVar+j]); } printf("\n"); } printf("sigma sensitivity = %10.7f\n", inputVecb[Ldim]); #endif for(int i=0;i<Ldim+1;i++) { gradientVec[i]=inputVecb[i]; } #if 0 /* call the CodiPack version for validation */ codi::RealReverse *inputVecCodi = new codi::RealReverse[n*n+1]; for(int i=0; i<n*n+1; i++){ inputVecCodi[i] = inputVec[i]; } codi::RealReverse lossValCodi = 0.0; float *inputVecbCodi = new float[n*n+1](); /* call the CodiPack version of "calcLossFunCPU" */ printf("calling calcLossFunCPU (reverse AD)...\n"); calcLossFunCPU(&lossValCodi,inputVecCodi, inputVecbCodi, dataVecTraining, Ntraining); printf("Lb (codipack result)= \n"); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%10.7f ", inputVecbCodi[i*n+j]); } printf("\n"); } printf("sigmab = %10.7f\n", inputVecbCodi[n*n]); printf("lossValCodi = %10.7f\n", lossValCodi.getValue()); #endif #if 0 printf("calculating regularization term...\n"); #endif for(int i=0;i<Ldim;i++) { inputVecRegb[i] = 0.0; } /* call the adjoint mode of the function to compute the regularization term */ calcRegTerms(inputVec, inputVecRegb, &regTerm, wSvd, w12, n); #if 0 printf("gradient of the regularization term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecRegb[i*numVar+j]); } printf("\n"); } #endif objFunVal = lossVal + regTerm; printf("initial value of the objective function = %10.7f\n",objFunVal); /* add the regularization sensitivities to the gradient vector */ for(int i=0;i<Ldim;i++) { gradientVec[i]+=inputVecRegb[i]; } #if 0 /* validation loop for the regularization term */ float f0 = 0.0; float tempSave; calcRegTerms(inputVec, &f0, wSvd, w12, n); printf("f0 = %10.7f\n",f0); float epsValReg= 0.001; for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) { printf("validating the (%d,%d) th element of M\n",i,j); tempSave = inputVec[i*n+j]; inputVec[i*n+j]+=epsValReg; float f1 = 0.0; calcRegTerms(inputVec, &f1, wSvd, w12, n); printf("f1 = %10.7f, f0 = %10.7f\n",f1,f0); inputVec[i*n+j]= tempSave; float fdVal = (f1-f0)/epsValReg; printf("fd value = %10.7f, ad value = %10.7f\n",fdVal,inputVecRegb[i*n+j]); float f2,f2d; /* call forward mode */ calcRegTerms(inputVec, &f2,&f2d, wSvd, w12, n, i*n+j); printf("primal value = %10.7f, forward ad value = %10.7f, ad value = %10.7f\n",f2,f2d,inputVecRegb[i*n+j]); } } #endif /* optimization loop */ /* check gradient */ for(int i=0;i<Ldim;i++) { if( gradientVec[i] != gradientVec[i]){ printf("gradientVec[%d] is NaN!\n",i); exit(1); } } float objectiveFunLocalBest = 10E14; for(int opt_iter=0 ; opt_iter < max_opt_iter; opt_iter++){ /* update M */ for (int i = 0; i < numVar; i++){ for (int j = 0; j <= i; j++) { inputVec[i*numVar+j]= inputVec[i*numVar+j] + inputVecVel[i*numVar+j]; } } for (int i = 0; i < numVar; i++){ for (int j = 0; j <= i; j++) { if ( inputVec[i*numVar+j] < 0) { inputVec[i*numVar+j] = 10E-6; } } } /* update sigma */ inputVec[Ldim]= inputVec[Ldim] + inputVecVel[Ldim]; if(inputVec[Ldim] <= 0) { inputVec[Ldim] = 10E-06; } for(int i=0;i<Ldim+1;i++) { inputVecb[i] = 0.0; } /* calculate the gradient vector */ #if 0 printf("evaluating gradient vector...\n"); #endif calcLossFunGPU_b(&lossVal, &lossValb, inputVec,inputVecb, dataVecTraining,Ntraining); #if 0 printf("Loss (GPU Version)= %10.7f\n", lossVal); #endif for(int i=0;i<Ldim+1;i++) { gradientVec[i]=inputVecb[i]; } #if 0 printf("calculating the regularization term...\n"); #endif for(int i=0;i<Ldim;i++) { inputVecRegb[i] = 0.0; } /* call the adjoint mode of the function to compute the regularization term */ calcRegTerms(inputVec, inputVecRegb, &regTerm, wSvd, w12, n); #if 0 printf("gradient of the regularization term = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVecRegb[i*numVar+j]); } printf("\n"); } #endif /* add the regularization sensitivities to the gradient vector */ for(int i=0;i<Ldim;i++) { gradientVec[i]+=inputVecRegb[i]; } objFunVal = lossVal + regTerm; if(objFunVal < objectiveFunLocalBest){ objectiveFunLocalBest = objFunVal; for(int i=0;i<Ldim+1;i++) { inputVecLocalBest[i]=inputVec[i]; } } if(opt_iter % 100 == 0){ printf("iter = %d, objective function = %10.7f, Leave One Out Error = %10.7f, Regularization term = %10.7f\n",opt_iter,objFunVal,lossVal, regTerm); #if 0 printf("L = \n"); for (int i = 0; i < numVar; i++) { for (int j = 0; j < numVar; j++) { printf("%10.7f ", inputVec[i*numVar+j]); } printf("\n"); } printf("sigma = %10.7f\n",inputVec[Ldim]); #endif } /* update velocity vector */ for(int i=0;i<Ldim;i++) { inputVecVel[i]=alpha* inputVecVel[i] - learning_rateM*gradientVec[i]; } inputVecVel[Ldim]=alpha* inputVecVel[Ldim] - learning_rateSigma*gradientVec[Ldim]; } /* end of local optimization loop */ for (int i = 0; i < numVar; i++) for (int j = 0; j < numVar; j++) { L(i,j)= inputVecLocalBest[i*numVar+j]; } #if 1 printf("local optimization result:\n"); printf("L = \n"); L.print(); printf("sigma = %10.7f\n", inputVecLocalBest[Ldim]); #endif sigma = inputVecLocalBest[Ldim]; fmat M = L*trans(L); #if 1 printf("M = \n"); M.print(); #endif float genError = 0.0; for(int i=0;i <NvalidationSet; i++){ frowvec xp = XValidation.row(i); float ytilde = kernelRegressor(XTraining, yTraining, xp, M, sigma); float yexact = yValidation(i); #if 0 printf("x:\n"); xp.print(); printf("ytilde = %10.7f, yexact = %10.7f\n",ytilde,yexact); #endif // genError += (yexact-ytilde)*(yexact-ytilde); genError += fabs(yexact-ytilde); } genError = genError/NvalidationSet; #if 1 printf("Generalization error = %10.7f\n",genError); #endif if(genError < optGenError) { #if 1 printf("Better L has been found, updating L...\n"); #endif bestL = L; bestsigma = sigma; optGenError = genError; } } /* end of cv loop */ L = bestL; sigma = bestsigma; delete[] inputVec; delete[] inputVecb; delete[] inputVecRegb; delete[] dataVecTraining; delete[] gradientVec; return 0; }
c0471ab7df4c068be24c228b6a55acf6546db8b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Accel.cu * * Created on: May 31, 2021 * Author: Edg@r j. */ #include "Accel.cuh" __global__ void find_borders_kernel(float2* array, float4 *brd, int *mutex, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ float cacheMaxX[256]; __shared__ float cacheMaxY[256]; __shared__ float cacheMinX[256]; __shared__ float cacheMinY[256]; float maxX = 1.0; float maxY = 1.0; float minX = -1.0; float minY = -1.0; while(index + offset < n){ maxX = fmaxf(maxX, array[index + offset].x); maxY = fmaxf(maxY, array[index + offset].y); minX = fminf(minX, array[index + offset].x); minY = fminf(minY, array[index + offset].y); offset += stride; } cacheMaxX[threadIdx.x] = maxX; cacheMaxY[threadIdx.x] = maxY; cacheMinX[threadIdx.x] = minX; cacheMinY[threadIdx.x] = minY; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cacheMaxX[threadIdx.x] = fmaxf(cacheMaxX[threadIdx.x], cacheMaxX[threadIdx.x + i]); cacheMaxY[threadIdx.x] = fmaxf(cacheMaxY[threadIdx.x], cacheMaxY[threadIdx.x + i]); cacheMinX[threadIdx.x] = fminf(cacheMinX[threadIdx.x], cacheMinX[threadIdx.x + i]); cacheMinY[threadIdx.x] = fminf(cacheMinY[threadIdx.x], cacheMinY[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock brd[0].x = fmaxf(brd[0].x, cacheMaxX[0]); brd[0].y = fmaxf(brd[0].y, cacheMaxY[0]); brd[0].z = fminf(brd[0].z, cacheMinX[0]); brd[0].w = fminf(brd[0].w, cacheMinY[0]); atomicExch(mutex, 0); //unlock } } __global__ void kernel_2(float2* d_poss,float2* d_color , int numPoints,mapping *d_mappings, int numMappings) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // If needed for performance, move hiprand_init to seperate kernel and store // states in device memory hiprandState_t state; hiprand_init((unsigned long long) clock(), index, 0, &state); // Set up transformation mapping once per block in shared memory extern __shared__ mapping maps[]; if(threadIdx.x == 0) { #pragma unroll for(int i = 0; i < numMappings; i++) maps[i] = d_mappings[i]; } __syncthreads(); // Initially start at a mapping vertex to guarantee we stay inside the // iterated function system int currentTarget = index % numMappings; float2 currentPosition, newPosition; currentPosition.x = maps[currentTarget].x; currentPosition.y = maps[currentTarget].y; for(int i = index; i < numPoints; i += stride) { // set the current vertex to the currentPosition d_poss[i].x = currentPosition.x ; d_poss[i].y = currentPosition.y ; // set the iteration percentage and current target mapping d_color[i].x = i / (float) numPoints; d_color[i].y = currentTarget; // find random target with given mapping probabilities // If needed for performance, find method to remove thread divergence // Note: changing 4 to numMappings in for loop reduced performance 50% float currentProb = hiprand_uniform(&state); float totalProb = 0.0f; for(int j = 0; j < numMappings; j++) { totalProb += maps[j].p; if(currentProb < totalProb) { currentTarget = j; break; } } // calculate the transformation // (x_n+1) = (a b)(x_n) + (e) // (y_n+1) (c d)(y_n) (f) newPosition.x = maps[currentTarget].a * currentPosition.x + maps[currentTarget].b * currentPosition.y + maps[currentTarget].x; newPosition.y = maps[currentTarget].c * currentPosition.x + maps[currentTarget].d * currentPosition.y + maps[currentTarget].y; currentPosition = newPosition; } } __global__ void kernel_test(float2* d_pointData, int numPoints,mapping *d_mappings, int numMappings) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int stride = blockDim.x * gridDim.x; int currentTarget = index % numMappings; //d_pointData[index].x = 0.0f + currentTarget * 0.5f; d_pointData[index].y = 0.0f + currentTarget * 0.10f; //d_pointData[index].x = 0.0f; //d_pointData[index].y = 0.0f; } Accel::Accel() { // Initialize CUDA checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipGetDevice(&m_cuDevice)); checkCudaErrors(hipGetDeviceProperties(&m_cuDevProp,m_cuDevice)); hipDriverGetVersion(&m_driverVersion); hipRuntimeGetVersion(&m_runtimeVersion); // Print device properties printf("............................GPU...........................\t\n"); printf("\tDevice Name: %s\n", m_cuDevProp.name); printf("\tCUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", m_driverVersion / 1000, (m_driverVersion % 100) / 10, m_runtimeVersion / 1000, (m_runtimeVersion % 100) / 10); printf("\tCompute Capability: %d.%d\n", m_cuDevProp.major, m_cuDevProp.minor); printf("\tTotal Global Memory: %ld bytes\n", m_cuDevProp.totalGlobalMem); printf("\tNumber of Multiprocessors: %d\n", m_cuDevProp.multiProcessorCount); printf("\tMaximum Threads per Multiprocessor: %d\n", m_cuDevProp.maxThreadsPerMultiProcessor); printf("\tTotal Number of Threads: %d\n", m_cuDevProp.multiProcessorCount * m_cuDevProp.maxThreadsPerMultiProcessor); printf("\tMaximum Threads per Block: %d\n", m_cuDevProp.maxThreadsPerBlock); printf(".........................................................\t\n\n"); // Start the Fractal structure for (int k=0; k< NF; k++){ m_fk[k].g_strucPoss = NULL; m_fk[k].g_strucColor = NULL; m_fk[k].d_glPoss = NULL; m_fk[k].d_glColor = NULL; m_fk[k].d_borders = NULL; m_fk[k].h_borders = NULL; m_fk[k].fXmin = 0.0; m_fk[k].fXmax = 0.0; m_fk[k].fYmin = 0.0; m_fk[k].fYmax = 0.0; m_fk[k].d_mutex = NULL; m_fk[k].d_map = NULL; m_fk[k].h_map = NULL; } // Timer related m_fFlops = m_fStepsec = 0.0f; // Memory Flags related m_bChangeInterop = m_bChangeMalloc = true; m_numBlocks = m_blockSize = 0; //m_fXmax = m_fXmin = m_fYmax = m_fYmin = 0.0; } void Accel::interopCUDA(){ std::cout<<"Seting up CUDA-OpenGL buffer...\n\n"; // Prepare graphics interoperability for (int k=0; k< NF; k++){ if(m_fk[k].g_strucPoss != NULL) checkCudaErrors(hipGraphicsUnregisterResource(m_fk[k].g_strucPoss)); if(m_fk[k].g_strucColor != NULL) checkCudaErrors(hipGraphicsUnregisterResource(m_fk[k].g_strucColor)); glDeleteBuffers(1,&m_fk[k].g_poss); glDeleteBuffers(1,&m_fk[k].g_color); // Creation of share buffer between CUDA and OpenGL glGenBuffers(1, &m_fk[k].g_poss); glBindBuffer(GL_ARRAY_BUFFER, m_fk[k].g_poss); unsigned int sizeP = MAX_POINTS * 2 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, sizeP, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); glGenBuffers(1, &m_fk[k].g_color); glBindBuffer(GL_ARRAY_BUFFER, m_fk[k].g_color); unsigned int sizeC = MAX_POINTS * 2 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, sizeC, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // Register CUDA and OpenGL Interop checkCudaErrors(hipGraphicsGLRegisterBuffer(&m_fk[k].g_strucPoss,m_fk[k].g_poss,hipGraphicsMapFlagsNone)); checkCudaErrors(hipGraphicsGLRegisterBuffer(&m_fk[k].g_strucColor,m_fk[k].g_color,hipGraphicsMapFlagsNone)); } } void Accel::malloCUDA(int numMaps){ // For params of fractals for (int k=0; k< NF; k++){ if(m_fk[k].d_map != NULL) checkCudaErrors(hipFree(m_fk[k].d_map)); checkCudaErrors(hipMalloc((void**)&m_fk[k].d_map,numMaps*sizeof(mapping))); checkCudaErrors(hipMemcpy(m_fk[k].d_map,m_fk[k].h_map,numMaps*sizeof(mapping),hipMemcpyHostToDevice)); // To check borders if(m_fk[k].d_borders != NULL) checkCudaErrors(hipFree(m_fk[k].d_borders)); hipMalloc((void**)&m_fk[k].d_borders,sizeof(float4)); hipMemset(m_fk[k].d_borders,0, sizeof(float4)); if(m_fk[k].d_mutex != NULL) checkCudaErrors(hipFree(m_fk[k].d_mutex)); hipMalloc((void**)&m_fk[k].d_mutex,sizeof(int)); hipMemset(m_fk[k].d_mutex, 0, sizeof(int)); if(m_fk[k].h_borders != NULL) free(m_fk[k].h_borders); m_fk[k].h_borders = (float*)malloc(4*sizeof(float)); } } void Accel::fractalKernel(int numMappings, int numPoints){ m_numBlocks = 1; m_blockSize = 1024; size_t mapsizevbo; for (int k=0; k< NF; k++){ checkCudaErrors(hipGraphicsMapResources(1,&m_fk[k].g_strucPoss,0)); checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&m_fk[k].d_glPoss,&mapsizevbo,m_fk[k].g_strucPoss)); checkCudaErrors(hipGraphicsMapResources(1,&m_fk[k].g_strucColor,0)); checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&m_fk[k].d_glColor,&mapsizevbo,m_fk[k].g_strucColor)); } hipEvent_t start, stop; checkCudaErrors( hipEventCreate(&start) ); checkCudaErrors( hipEventCreate(&stop) ); checkCudaErrors( hipEventRecord(start) ); for (int k=0; k< NF; k++){ // Compute Fractal hipLaunchKernelGGL(( kernel_2), dim3(m_numBlocks), dim3(m_blockSize), numMappings * sizeof(mapping), 0, (float2*)m_fk[k].d_glPoss,(float2*)m_fk[k].d_glColor , numPoints, m_fk[k].d_map, numMappings); // Compute Borders of the fractal dim3 gridSize = 256; dim3 blockSize = 256; hipMemset(m_fk[k].d_mutex, 0, sizeof(int)); hipLaunchKernelGGL(( find_borders_kernel), dim3(gridSize), dim3(blockSize) , 0, 0, (float2*)m_fk[k].d_glPoss,m_fk[k].d_borders, m_fk[k].d_mutex, (unsigned int)numPoints); checkCudaErrors(hipMemcpy(m_fk[k].h_borders, m_fk[k].d_borders, sizeof(float4), hipMemcpyDeviceToHost)); checkCudaErrors( hipDeviceSynchronize() ); m_fk[k].fXmax = m_fk[k].h_borders[0]; m_fk[k].fYmax = m_fk[k].h_borders[1]; m_fk[k].fXmin = m_fk[k].h_borders[2]; m_fk[k].fYmin = m_fk[k].h_borders[3]; } /* cout<<"Maximum X found on gpu was: "<<m_fXmax<<endl; cout<<"Maximum Y found on gpu was: "<<m_fYmax<<endl; cout<<"Minimum X found on gpu was: "<<m_fXmin<<endl; cout<<"Minimum Y found on gpu was: "<<m_fYmin<<endl<<endl; */ checkCudaErrors( hipEventRecord(stop) ); // handle any synchronous and asynchronous kernel errors checkCudaErrors( hipGetLastError() ); checkCudaErrors( hipDeviceSynchronize() ); // record and print kernel timing checkCudaErrors( hipEventSynchronize(stop) ); m_kernel_mili = 0; checkCudaErrors( hipEventElapsedTime(&m_kernel_mili, start, stop) ); // Unmap OpenGL resources for (int k=0; k< NF; k++){ checkCudaErrors(hipGraphicsUnmapResources(1,&m_fk[k].g_strucPoss,0)); checkCudaErrors(hipGraphicsUnmapResources(1,&m_fk[k].g_strucColor,0)); } } Accel::~Accel() { // Unregister if CUDA-InteropGL std::cout<<"Unregistering CUDA-GL Resources...\n"; for (int k=0; k < NF; k++){ if(m_fk[k].g_strucPoss != NULL) checkCudaErrors(hipGraphicsUnregisterResource(m_fk[k].g_strucPoss)); if(m_fk[k].g_strucColor != NULL) checkCudaErrors(hipGraphicsUnregisterResource(m_fk[k].g_strucColor)); if(m_fk[k].h_borders != NULL) free(m_fk[k].h_borders); if(m_fk[k].d_map != NULL) checkCudaErrors(hipFree(m_fk[k].d_map)); } // Free memory for HALF interop //delete [] m_fPossVBO; }
c0471ab7df4c068be24c228b6a55acf6546db8b4.cu
/* * Accel.cu * * Created on: May 31, 2021 * Author: Edg@r j. */ #include "Accel.cuh" __global__ void find_borders_kernel(float2* array, float4 *brd, int *mutex, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ float cacheMaxX[256]; __shared__ float cacheMaxY[256]; __shared__ float cacheMinX[256]; __shared__ float cacheMinY[256]; float maxX = 1.0; float maxY = 1.0; float minX = -1.0; float minY = -1.0; while(index + offset < n){ maxX = fmaxf(maxX, array[index + offset].x); maxY = fmaxf(maxY, array[index + offset].y); minX = fminf(minX, array[index + offset].x); minY = fminf(minY, array[index + offset].y); offset += stride; } cacheMaxX[threadIdx.x] = maxX; cacheMaxY[threadIdx.x] = maxY; cacheMinX[threadIdx.x] = minX; cacheMinY[threadIdx.x] = minY; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cacheMaxX[threadIdx.x] = fmaxf(cacheMaxX[threadIdx.x], cacheMaxX[threadIdx.x + i]); cacheMaxY[threadIdx.x] = fmaxf(cacheMaxY[threadIdx.x], cacheMaxY[threadIdx.x + i]); cacheMinX[threadIdx.x] = fminf(cacheMinX[threadIdx.x], cacheMinX[threadIdx.x + i]); cacheMinY[threadIdx.x] = fminf(cacheMinY[threadIdx.x], cacheMinY[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock brd[0].x = fmaxf(brd[0].x, cacheMaxX[0]); brd[0].y = fmaxf(brd[0].y, cacheMaxY[0]); brd[0].z = fminf(brd[0].z, cacheMinX[0]); brd[0].w = fminf(brd[0].w, cacheMinY[0]); atomicExch(mutex, 0); //unlock } } __global__ void kernel_2(float2* d_poss,float2* d_color , int numPoints,mapping *d_mappings, int numMappings) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // If needed for performance, move curand_init to seperate kernel and store // states in device memory curandState state; curand_init((unsigned long long) clock(), index, 0, &state); // Set up transformation mapping once per block in shared memory extern __shared__ mapping maps[]; if(threadIdx.x == 0) { #pragma unroll for(int i = 0; i < numMappings; i++) maps[i] = d_mappings[i]; } __syncthreads(); // Initially start at a mapping vertex to guarantee we stay inside the // iterated function system int currentTarget = index % numMappings; float2 currentPosition, newPosition; currentPosition.x = maps[currentTarget].x; currentPosition.y = maps[currentTarget].y; for(int i = index; i < numPoints; i += stride) { // set the current vertex to the currentPosition d_poss[i].x = currentPosition.x ; d_poss[i].y = currentPosition.y ; // set the iteration percentage and current target mapping d_color[i].x = i / (float) numPoints; d_color[i].y = currentTarget; // find random target with given mapping probabilities // If needed for performance, find method to remove thread divergence // Note: changing 4 to numMappings in for loop reduced performance 50% float currentProb = curand_uniform(&state); float totalProb = 0.0f; for(int j = 0; j < numMappings; j++) { totalProb += maps[j].p; if(currentProb < totalProb) { currentTarget = j; break; } } // calculate the transformation // (x_n+1) = (a b)(x_n) + (e) // (y_n+1) (c d)(y_n) (f) newPosition.x = maps[currentTarget].a * currentPosition.x + maps[currentTarget].b * currentPosition.y + maps[currentTarget].x; newPosition.y = maps[currentTarget].c * currentPosition.x + maps[currentTarget].d * currentPosition.y + maps[currentTarget].y; currentPosition = newPosition; } } __global__ void kernel_test(float2* d_pointData, int numPoints,mapping *d_mappings, int numMappings) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int stride = blockDim.x * gridDim.x; int currentTarget = index % numMappings; //d_pointData[index].x = 0.0f + currentTarget * 0.5f; d_pointData[index].y = 0.0f + currentTarget * 0.10f; //d_pointData[index].x = 0.0f; //d_pointData[index].y = 0.0f; } Accel::Accel() { // Initialize CUDA checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaGetDevice(&m_cuDevice)); checkCudaErrors(cudaGetDeviceProperties(&m_cuDevProp,m_cuDevice)); cudaDriverGetVersion(&m_driverVersion); cudaRuntimeGetVersion(&m_runtimeVersion); // Print device properties printf("............................GPU...........................\t\n"); printf("\tDevice Name: %s\n", m_cuDevProp.name); printf("\tCUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", m_driverVersion / 1000, (m_driverVersion % 100) / 10, m_runtimeVersion / 1000, (m_runtimeVersion % 100) / 10); printf("\tCompute Capability: %d.%d\n", m_cuDevProp.major, m_cuDevProp.minor); printf("\tTotal Global Memory: %ld bytes\n", m_cuDevProp.totalGlobalMem); printf("\tNumber of Multiprocessors: %d\n", m_cuDevProp.multiProcessorCount); printf("\tMaximum Threads per Multiprocessor: %d\n", m_cuDevProp.maxThreadsPerMultiProcessor); printf("\tTotal Number of Threads: %d\n", m_cuDevProp.multiProcessorCount * m_cuDevProp.maxThreadsPerMultiProcessor); printf("\tMaximum Threads per Block: %d\n", m_cuDevProp.maxThreadsPerBlock); printf(".........................................................\t\n\n"); // Start the Fractal structure for (int k=0; k< NF; k++){ m_fk[k].g_strucPoss = NULL; m_fk[k].g_strucColor = NULL; m_fk[k].d_glPoss = NULL; m_fk[k].d_glColor = NULL; m_fk[k].d_borders = NULL; m_fk[k].h_borders = NULL; m_fk[k].fXmin = 0.0; m_fk[k].fXmax = 0.0; m_fk[k].fYmin = 0.0; m_fk[k].fYmax = 0.0; m_fk[k].d_mutex = NULL; m_fk[k].d_map = NULL; m_fk[k].h_map = NULL; } // Timer related m_fFlops = m_fStepsec = 0.0f; // Memory Flags related m_bChangeInterop = m_bChangeMalloc = true; m_numBlocks = m_blockSize = 0; //m_fXmax = m_fXmin = m_fYmax = m_fYmin = 0.0; } void Accel::interopCUDA(){ std::cout<<"Seting up CUDA-OpenGL buffer...\n\n"; // Prepare graphics interoperability for (int k=0; k< NF; k++){ if(m_fk[k].g_strucPoss != NULL) checkCudaErrors(cudaGraphicsUnregisterResource(m_fk[k].g_strucPoss)); if(m_fk[k].g_strucColor != NULL) checkCudaErrors(cudaGraphicsUnregisterResource(m_fk[k].g_strucColor)); glDeleteBuffers(1,&m_fk[k].g_poss); glDeleteBuffers(1,&m_fk[k].g_color); // Creation of share buffer between CUDA and OpenGL glGenBuffers(1, &m_fk[k].g_poss); glBindBuffer(GL_ARRAY_BUFFER, m_fk[k].g_poss); unsigned int sizeP = MAX_POINTS * 2 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, sizeP, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); glGenBuffers(1, &m_fk[k].g_color); glBindBuffer(GL_ARRAY_BUFFER, m_fk[k].g_color); unsigned int sizeC = MAX_POINTS * 2 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, sizeC, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // Register CUDA and OpenGL Interop checkCudaErrors(cudaGraphicsGLRegisterBuffer(&m_fk[k].g_strucPoss,m_fk[k].g_poss,cudaGraphicsMapFlagsNone)); checkCudaErrors(cudaGraphicsGLRegisterBuffer(&m_fk[k].g_strucColor,m_fk[k].g_color,cudaGraphicsMapFlagsNone)); } } void Accel::malloCUDA(int numMaps){ // For params of fractals for (int k=0; k< NF; k++){ if(m_fk[k].d_map != NULL) checkCudaErrors(cudaFree(m_fk[k].d_map)); checkCudaErrors(cudaMalloc((void**)&m_fk[k].d_map,numMaps*sizeof(mapping))); checkCudaErrors(cudaMemcpy(m_fk[k].d_map,m_fk[k].h_map,numMaps*sizeof(mapping),cudaMemcpyHostToDevice)); // To check borders if(m_fk[k].d_borders != NULL) checkCudaErrors(cudaFree(m_fk[k].d_borders)); cudaMalloc((void**)&m_fk[k].d_borders,sizeof(float4)); cudaMemset(m_fk[k].d_borders,0, sizeof(float4)); if(m_fk[k].d_mutex != NULL) checkCudaErrors(cudaFree(m_fk[k].d_mutex)); cudaMalloc((void**)&m_fk[k].d_mutex,sizeof(int)); cudaMemset(m_fk[k].d_mutex, 0, sizeof(int)); if(m_fk[k].h_borders != NULL) free(m_fk[k].h_borders); m_fk[k].h_borders = (float*)malloc(4*sizeof(float)); } } void Accel::fractalKernel(int numMappings, int numPoints){ m_numBlocks = 1; m_blockSize = 1024; size_t mapsizevbo; for (int k=0; k< NF; k++){ checkCudaErrors(cudaGraphicsMapResources(1,&m_fk[k].g_strucPoss,0)); checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&m_fk[k].d_glPoss,&mapsizevbo,m_fk[k].g_strucPoss)); checkCudaErrors(cudaGraphicsMapResources(1,&m_fk[k].g_strucColor,0)); checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&m_fk[k].d_glColor,&mapsizevbo,m_fk[k].g_strucColor)); } cudaEvent_t start, stop; checkCudaErrors( cudaEventCreate(&start) ); checkCudaErrors( cudaEventCreate(&stop) ); checkCudaErrors( cudaEventRecord(start) ); for (int k=0; k< NF; k++){ // Compute Fractal kernel_2<<<m_numBlocks, m_blockSize, numMappings * sizeof(mapping)>>> ((float2*)m_fk[k].d_glPoss,(float2*)m_fk[k].d_glColor , numPoints, m_fk[k].d_map, numMappings); // Compute Borders of the fractal dim3 gridSize = 256; dim3 blockSize = 256; cudaMemset(m_fk[k].d_mutex, 0, sizeof(int)); find_borders_kernel<<< gridSize, blockSize >>> ((float2*)m_fk[k].d_glPoss,m_fk[k].d_borders, m_fk[k].d_mutex, (unsigned int)numPoints); checkCudaErrors(cudaMemcpy(m_fk[k].h_borders, m_fk[k].d_borders, sizeof(float4), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaDeviceSynchronize() ); m_fk[k].fXmax = m_fk[k].h_borders[0]; m_fk[k].fYmax = m_fk[k].h_borders[1]; m_fk[k].fXmin = m_fk[k].h_borders[2]; m_fk[k].fYmin = m_fk[k].h_borders[3]; } /* cout<<"Maximum X found on gpu was: "<<m_fXmax<<endl; cout<<"Maximum Y found on gpu was: "<<m_fYmax<<endl; cout<<"Minimum X found on gpu was: "<<m_fXmin<<endl; cout<<"Minimum Y found on gpu was: "<<m_fYmin<<endl<<endl; */ checkCudaErrors( cudaEventRecord(stop) ); // handle any synchronous and asynchronous kernel errors checkCudaErrors( cudaGetLastError() ); checkCudaErrors( cudaDeviceSynchronize() ); // record and print kernel timing checkCudaErrors( cudaEventSynchronize(stop) ); m_kernel_mili = 0; checkCudaErrors( cudaEventElapsedTime(&m_kernel_mili, start, stop) ); // Unmap OpenGL resources for (int k=0; k< NF; k++){ checkCudaErrors(cudaGraphicsUnmapResources(1,&m_fk[k].g_strucPoss,0)); checkCudaErrors(cudaGraphicsUnmapResources(1,&m_fk[k].g_strucColor,0)); } } Accel::~Accel() { // Unregister if CUDA-InteropGL std::cout<<"Unregistering CUDA-GL Resources...\n"; for (int k=0; k < NF; k++){ if(m_fk[k].g_strucPoss != NULL) checkCudaErrors(cudaGraphicsUnregisterResource(m_fk[k].g_strucPoss)); if(m_fk[k].g_strucColor != NULL) checkCudaErrors(cudaGraphicsUnregisterResource(m_fk[k].g_strucColor)); if(m_fk[k].h_borders != NULL) free(m_fk[k].h_borders); if(m_fk[k].d_map != NULL) checkCudaErrors(cudaFree(m_fk[k].d_map)); } // Free memory for HALF interop //delete [] m_fPossVBO; }
052581b8bfcde1de4e0dac7a492d5ac2147ebbad.hip
// !!! This is a file automatically generated by hipify!!! //====================================================================================================100 // UPDATE //====================================================================================================100 // 2006.03 Rob Janiczek // --creation of prototype version // 2006.03 Drew Gilliam // --rewriting of prototype version into current version // --got rid of multiple function calls, all code in a // single function (for speed) // --code cleanup & commenting // --code optimization efforts // 2006.04 Drew Gilliam // --added diffusion coefficent saturation on [0,1] // 2009.12 Lukasz G. Szafaryn // -- reading from image, command line inputs // 2010.01 Lukasz G. Szafaryn // --comments //====================================================================================================100 // DEFINE / INCLUDE //====================================================================================================100 #include <stdlib.h> #include <math.h> #include <string.h> #include <hip/hip_runtime.h> #include "main.h" #include "extract_kernel.hip" #include "prepare_kernel.cu" #include "reduce_kernel.hip" #include "srad_kernel.hip" #include "srad2_kernel.cu" #include "compress_kernel.cu" #include "graphics.c" #include "resize.c" #include "timer.c" //====================================================================================================100 // MAIN FUNCTION //====================================================================================================100 int main(int argc, char *argv []){ //================================================================================80 // VARIABLES //================================================================================80 // time long long time0; long long time1; long long time2; long long time3; long long time4; long long time5; long long time6; long long time7; long long time8; long long time9; long long time10; long long time11; long long time12; time0 = get_time(); // inputs image, input paramenters fp* image_ori; // originalinput image int image_ori_rows; int image_ori_cols; long image_ori_elem; // inputs image, input paramenters fp* image; // input image int Nr,Nc; // IMAGE nbr of rows/cols/elements long Ne; // algorithm parameters int niter; // nbr of iterations fp lambda; // update step size // size of IMAGE int r1,r2,c1,c2; // row/col coordinates of uniform ROI long NeROI; // ROI nbr of elements // surrounding pixel indicies int *iN,*iS,*jE,*jW; // counters int iter; // primary loop long i,j; // image row/col // memory sizes int mem_size_i; int mem_size_j; int mem_size_single; //================================================================================80 // GPU VARIABLES //================================================================================80 // CUDA kernel execution parameters dim3 threads; int blocks_x; dim3 blocks; dim3 blocks2; dim3 blocks3; // memory sizes int mem_size; // matrix memory size // HOST int no; int mul; fp total; fp total2; fp meanROI; fp meanROI2; fp varROI; fp q0sqr; // DEVICE fp* d_sums; // partial sum fp* d_sums2; int* d_iN; int* d_iS; int* d_jE; int* d_jW; fp* d_dN; fp* d_dS; fp* d_dW; fp* d_dE; fp* d_I; // input IMAGE on DEVICE fp* d_c; time1 = get_time(); //================================================================================80 // GET INPUT PARAMETERS //================================================================================80 if(argc != 5){ printf("ERROR: wrong number of arguments\n"); return 0; } else{ niter = atoi(argv[1]); lambda = atof(argv[2]); Nr = atoi(argv[3]); // it is 502 in the original image Nc = atoi(argv[4]); // it is 458 in the original image } time2 = get_time(); //================================================================================80 // READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN) //================================================================================80 // read image image_ori_rows = 502; image_ori_cols = 458; image_ori_elem = image_ori_rows * image_ori_cols; image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem); if ( !read_graphics( "../data/srad/image.pgm", image_ori, image_ori_rows, image_ori_cols, 1) ) return -1; time3 = get_time(); //================================================================================80 // RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig) //================================================================================80 Ne = Nr*Nc; image = (fp*)malloc(sizeof(fp) * Ne); resize( image_ori, image_ori_rows, image_ori_cols, image, Nr, Nc, 1); time4 = get_time(); //================================================================================80 // SETUP //================================================================================80 r1 = 0; // top row index of ROI r2 = Nr - 1; // bottom row index of ROI c1 = 0; // left column index of ROI c2 = Nc - 1; // right column index of ROI // ROI image size NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size // allocate variables for surrounding pixels mem_size_i = sizeof(int) * Nr; // iN = (int *)malloc(mem_size_i) ; // north surrounding element iS = (int *)malloc(mem_size_i) ; // south surrounding element mem_size_j = sizeof(int) * Nc; // jW = (int *)malloc(mem_size_j) ; // west surrounding element jE = (int *)malloc(mem_size_j) ; // east surrounding element // N/S/W/E indices of surrounding pixels (every element of IMAGE) for (i=0; i<Nr; i++) { iN[i] = i-1; // holds index of IMAGE row above iS[i] = i+1; // holds index of IMAGE row below } for (j=0; j<Nc; j++) { jW[j] = j-1; // holds index of IMAGE column on the left jE[j] = j+1; // holds index of IMAGE column on the right } // N/S/W/E boundary conditions, fix surrounding indices outside boundary of image iN[0] = 0; // changes IMAGE top row index from -1 to 0 iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1 jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0 jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1 //================================================================================80 // GPU SETUP //================================================================================80 // allocate memory for entire IMAGE on DEVICE mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE hipMalloc((void **)&d_I, mem_size); // // allocate memory for coordinates on DEVICE hipMalloc((void **)&d_iN, mem_size_i); // hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); // hipMalloc((void **)&d_iS, mem_size_i); // hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); // hipMalloc((void **)&d_jE, mem_size_j); // hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); // hipMalloc((void **)&d_jW, mem_size_j); // hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); // // allocate memory for partial sums on DEVICE hipMalloc((void **)&d_sums, mem_size); // hipMalloc((void **)&d_sums2, mem_size); // // allocate memory for derivatives hipMalloc((void **)&d_dN, mem_size); // hipMalloc((void **)&d_dS, mem_size); // hipMalloc((void **)&d_dW, mem_size); // hipMalloc((void **)&d_dE, mem_size); // // allocate memory for coefficient on DEVICE hipMalloc((void **)&d_c, mem_size); // //checkCUDAError("setup"); //================================================================================80 // KERNEL EXECUTION PARAMETERS //================================================================================80 // all kernels operating on entire matrix threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; blocks_x = Ne/threads.x; if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid blocks_x = blocks_x + 1; } blocks.x = blocks_x; // define the number of blocks in the grid blocks.y = 1; time5 = get_time(); //================================================================================80 // COPY INPUT TO CPU //================================================================================80 hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice); time6 = get_time(); //================================================================================80 // SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT //================================================================================80 hipLaunchKernelGGL(extract, blocks, threads, 0, 0, Ne, d_I); //checkCUDAError("extract"); time7 = get_time(); //================================================================================80 // COMPUTATION //================================================================================80 // printf("iterations: "); // execute main loop for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter // printf("%d ", iter); // fflush(NULL); // execute square kernel hipLaunchKernelGGL(prepare, blocks, threads, 0, 0, Ne, d_I, d_sums, d_sums2); //checkCUDAError("prepare"); // performs subsequent reductions of sums blocks2.x = blocks.x; // original number of blocks blocks2.y = blocks.y; no = Ne; // original number of sum elements mul = 1; // original multiplier while(blocks2.x != 0){ //checkCUDAError("before reduce"); // run kernel hipLaunchKernelGGL(reduce, blocks2, threads, 0, 0, Ne, no, mul, d_sums, d_sums2); //checkCUDAError("reduce"); // update execution parameters no = blocks2.x; // get current number of elements if(blocks2.x == 1){ blocks2.x = 0; } else{ mul = mul * NUMBER_THREADS; // update the increment blocks_x = blocks2.x/threads.x; // number of blocks if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid blocks_x = blocks_x + 1; } blocks2.x = blocks_x; blocks2.y = 1; } //checkCUDAError("after reduce"); } //checkCUDAError("before copy sum"); // copy total sums to device mem_size_single = sizeof(fp) * 1; hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost); hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost); //checkCUDAError("copy sum"); // calculate statistics meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI meanROI2 = meanROI * meanROI; // varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI q0sqr = varROI / meanROI2; // gets standard deviation of ROI // execute srad kernel hipLaunchKernelGGL(srad, blocks, threads, 0, 0, lambda, // SRAD coefficient Nr, // # of rows in input image Nc, // # of columns in input image Ne, // # of elements in input image d_iN, // indices of North surrounding pixels d_iS, // indices of South surrounding pixels d_jE, // indices of East surrounding pixels d_jW, // indices of West surrounding pixels d_dN, // North derivative d_dS, // South derivative d_dW, // West derivative d_dE, // East derivative q0sqr, // standard deviation of ROI d_c, // diffusion coefficient d_I); // output image //checkCUDAError("srad"); // execute srad2 kernel hipLaunchKernelGGL(srad2, blocks, threads, 0, 0, lambda, // SRAD coefficient Nr, // # of rows in input image Nc, // # of columns in input image Ne, // # of elements in input image d_iN, // indices of North surrounding pixels d_iS, // indices of South surrounding pixels d_jE, // indices of East surrounding pixels d_jW, // indices of West surrounding pixels d_dN, // North derivative d_dS, // South derivative d_dW, // West derivative d_dE, // East derivative d_c, // diffusion coefficient d_I); // output image //checkCUDAError("srad2"); } // printf("\n"); time8 = get_time(); //================================================================================80 // SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS //================================================================================80 hipLaunchKernelGGL(compress, blocks, threads, 0, 0, Ne, d_I); //checkCUDAError("compress"); time9 = get_time(); //================================================================================80 // COPY RESULTS BACK TO CPU //================================================================================80 hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost); //checkCUDAError("copy back"); time10 = get_time(); //================================================================================80 // WRITE IMAGE AFTER PROCESSING //================================================================================80 write_graphics( "image_out.pgm", image, Nr, Nc, 1, 255); time11 = get_time(); //================================================================================80 // DEALLOCATE //================================================================================80 free(image_ori); free(image); free(iN); free(iS); free(jW); free(jE); hipFree(d_I); hipFree(d_c); hipFree(d_iN); hipFree(d_iS); hipFree(d_jE); hipFree(d_jW); hipFree(d_dN); hipFree(d_dS); hipFree(d_dE); hipFree(d_dW); hipFree(d_sums); hipFree(d_sums2); time12 = get_time(); //================================================================================80 // DISPLAY TIMING //================================================================================80 printf("Time spent in different stages of the application:\n"); printf("%15.12f s, %15.12f %% : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time12-time0) / 1000000); } //====================================================================================================100 // END OF FILE //====================================================================================================100
052581b8bfcde1de4e0dac7a492d5ac2147ebbad.cu
//====================================================================================================100 // UPDATE //====================================================================================================100 // 2006.03 Rob Janiczek // --creation of prototype version // 2006.03 Drew Gilliam // --rewriting of prototype version into current version // --got rid of multiple function calls, all code in a // single function (for speed) // --code cleanup & commenting // --code optimization efforts // 2006.04 Drew Gilliam // --added diffusion coefficent saturation on [0,1] // 2009.12 Lukasz G. Szafaryn // -- reading from image, command line inputs // 2010.01 Lukasz G. Szafaryn // --comments //====================================================================================================100 // DEFINE / INCLUDE //====================================================================================================100 #include <stdlib.h> #include <math.h> #include <string.h> #include <hip/hip_runtime.h> #include "main.h" #include "extract_kernel.cu" #include "prepare_kernel.cu" #include "reduce_kernel.cu" #include "srad_kernel.cu" #include "srad2_kernel.cu" #include "compress_kernel.cu" #include "graphics.c" #include "resize.c" #include "timer.c" //====================================================================================================100 // MAIN FUNCTION //====================================================================================================100 int main(int argc, char *argv []){ //================================================================================80 // VARIABLES //================================================================================80 // time long long time0; long long time1; long long time2; long long time3; long long time4; long long time5; long long time6; long long time7; long long time8; long long time9; long long time10; long long time11; long long time12; time0 = get_time(); // inputs image, input paramenters fp* image_ori; // originalinput image int image_ori_rows; int image_ori_cols; long image_ori_elem; // inputs image, input paramenters fp* image; // input image int Nr,Nc; // IMAGE nbr of rows/cols/elements long Ne; // algorithm parameters int niter; // nbr of iterations fp lambda; // update step size // size of IMAGE int r1,r2,c1,c2; // row/col coordinates of uniform ROI long NeROI; // ROI nbr of elements // surrounding pixel indicies int *iN,*iS,*jE,*jW; // counters int iter; // primary loop long i,j; // image row/col // memory sizes int mem_size_i; int mem_size_j; int mem_size_single; //================================================================================80 // GPU VARIABLES //================================================================================80 // CUDA kernel execution parameters dim3 threads; int blocks_x; dim3 blocks; dim3 blocks2; dim3 blocks3; // memory sizes int mem_size; // matrix memory size // HOST int no; int mul; fp total; fp total2; fp meanROI; fp meanROI2; fp varROI; fp q0sqr; // DEVICE fp* d_sums; // partial sum fp* d_sums2; int* d_iN; int* d_iS; int* d_jE; int* d_jW; fp* d_dN; fp* d_dS; fp* d_dW; fp* d_dE; fp* d_I; // input IMAGE on DEVICE fp* d_c; time1 = get_time(); //================================================================================80 // GET INPUT PARAMETERS //================================================================================80 if(argc != 5){ printf("ERROR: wrong number of arguments\n"); return 0; } else{ niter = atoi(argv[1]); lambda = atof(argv[2]); Nr = atoi(argv[3]); // it is 502 in the original image Nc = atoi(argv[4]); // it is 458 in the original image } time2 = get_time(); //================================================================================80 // READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN) //================================================================================80 // read image image_ori_rows = 502; image_ori_cols = 458; image_ori_elem = image_ori_rows * image_ori_cols; image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem); if ( !read_graphics( "../data/srad/image.pgm", image_ori, image_ori_rows, image_ori_cols, 1) ) return -1; time3 = get_time(); //================================================================================80 // RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig) //================================================================================80 Ne = Nr*Nc; image = (fp*)malloc(sizeof(fp) * Ne); resize( image_ori, image_ori_rows, image_ori_cols, image, Nr, Nc, 1); time4 = get_time(); //================================================================================80 // SETUP //================================================================================80 r1 = 0; // top row index of ROI r2 = Nr - 1; // bottom row index of ROI c1 = 0; // left column index of ROI c2 = Nc - 1; // right column index of ROI // ROI image size NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size // allocate variables for surrounding pixels mem_size_i = sizeof(int) * Nr; // iN = (int *)malloc(mem_size_i) ; // north surrounding element iS = (int *)malloc(mem_size_i) ; // south surrounding element mem_size_j = sizeof(int) * Nc; // jW = (int *)malloc(mem_size_j) ; // west surrounding element jE = (int *)malloc(mem_size_j) ; // east surrounding element // N/S/W/E indices of surrounding pixels (every element of IMAGE) for (i=0; i<Nr; i++) { iN[i] = i-1; // holds index of IMAGE row above iS[i] = i+1; // holds index of IMAGE row below } for (j=0; j<Nc; j++) { jW[j] = j-1; // holds index of IMAGE column on the left jE[j] = j+1; // holds index of IMAGE column on the right } // N/S/W/E boundary conditions, fix surrounding indices outside boundary of image iN[0] = 0; // changes IMAGE top row index from -1 to 0 iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1 jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0 jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1 //================================================================================80 // GPU SETUP //================================================================================80 // allocate memory for entire IMAGE on DEVICE mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE hipMalloc((void **)&d_I, mem_size); // // allocate memory for coordinates on DEVICE hipMalloc((void **)&d_iN, mem_size_i); // hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); // hipMalloc((void **)&d_iS, mem_size_i); // hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); // hipMalloc((void **)&d_jE, mem_size_j); // hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); // hipMalloc((void **)&d_jW, mem_size_j); // hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); // // allocate memory for partial sums on DEVICE hipMalloc((void **)&d_sums, mem_size); // hipMalloc((void **)&d_sums2, mem_size); // // allocate memory for derivatives hipMalloc((void **)&d_dN, mem_size); // hipMalloc((void **)&d_dS, mem_size); // hipMalloc((void **)&d_dW, mem_size); // hipMalloc((void **)&d_dE, mem_size); // // allocate memory for coefficient on DEVICE hipMalloc((void **)&d_c, mem_size); // //checkCUDAError("setup"); //================================================================================80 // KERNEL EXECUTION PARAMETERS //================================================================================80 // all kernels operating on entire matrix threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; blocks_x = Ne/threads.x; if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid blocks_x = blocks_x + 1; } blocks.x = blocks_x; // define the number of blocks in the grid blocks.y = 1; time5 = get_time(); //================================================================================80 // COPY INPUT TO CPU //================================================================================80 hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice); time6 = get_time(); //================================================================================80 // SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT //================================================================================80 hipLaunchKernelGGL(extract, blocks, threads, 0, 0, Ne, d_I); //checkCUDAError("extract"); time7 = get_time(); //================================================================================80 // COMPUTATION //================================================================================80 // printf("iterations: "); // execute main loop for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter // printf("%d ", iter); // fflush(NULL); // execute square kernel hipLaunchKernelGGL(prepare, blocks, threads, 0, 0, Ne, d_I, d_sums, d_sums2); //checkCUDAError("prepare"); // performs subsequent reductions of sums blocks2.x = blocks.x; // original number of blocks blocks2.y = blocks.y; no = Ne; // original number of sum elements mul = 1; // original multiplier while(blocks2.x != 0){ //checkCUDAError("before reduce"); // run kernel hipLaunchKernelGGL(reduce, blocks2, threads, 0, 0, Ne, no, mul, d_sums, d_sums2); //checkCUDAError("reduce"); // update execution parameters no = blocks2.x; // get current number of elements if(blocks2.x == 1){ blocks2.x = 0; } else{ mul = mul * NUMBER_THREADS; // update the increment blocks_x = blocks2.x/threads.x; // number of blocks if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid blocks_x = blocks_x + 1; } blocks2.x = blocks_x; blocks2.y = 1; } //checkCUDAError("after reduce"); } //checkCUDAError("before copy sum"); // copy total sums to device mem_size_single = sizeof(fp) * 1; hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost); hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost); //checkCUDAError("copy sum"); // calculate statistics meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI meanROI2 = meanROI * meanROI; // varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI q0sqr = varROI / meanROI2; // gets standard deviation of ROI // execute srad kernel hipLaunchKernelGGL(srad, blocks, threads, 0, 0, lambda, // SRAD coefficient Nr, // # of rows in input image Nc, // # of columns in input image Ne, // # of elements in input image d_iN, // indices of North surrounding pixels d_iS, // indices of South surrounding pixels d_jE, // indices of East surrounding pixels d_jW, // indices of West surrounding pixels d_dN, // North derivative d_dS, // South derivative d_dW, // West derivative d_dE, // East derivative q0sqr, // standard deviation of ROI d_c, // diffusion coefficient d_I); // output image //checkCUDAError("srad"); // execute srad2 kernel hipLaunchKernelGGL(srad2, blocks, threads, 0, 0, lambda, // SRAD coefficient Nr, // # of rows in input image Nc, // # of columns in input image Ne, // # of elements in input image d_iN, // indices of North surrounding pixels d_iS, // indices of South surrounding pixels d_jE, // indices of East surrounding pixels d_jW, // indices of West surrounding pixels d_dN, // North derivative d_dS, // South derivative d_dW, // West derivative d_dE, // East derivative d_c, // diffusion coefficient d_I); // output image //checkCUDAError("srad2"); } // printf("\n"); time8 = get_time(); //================================================================================80 // SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS //================================================================================80 hipLaunchKernelGGL(compress, blocks, threads, 0, 0, Ne, d_I); //checkCUDAError("compress"); time9 = get_time(); //================================================================================80 // COPY RESULTS BACK TO CPU //================================================================================80 hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost); //checkCUDAError("copy back"); time10 = get_time(); //================================================================================80 // WRITE IMAGE AFTER PROCESSING //================================================================================80 write_graphics( "image_out.pgm", image, Nr, Nc, 1, 255); time11 = get_time(); //================================================================================80 // DEALLOCATE //================================================================================80 free(image_ori); free(image); free(iN); free(iS); free(jW); free(jE); hipFree(d_I); hipFree(d_c); hipFree(d_iN); hipFree(d_iS); hipFree(d_jE); hipFree(d_jW); hipFree(d_dN); hipFree(d_dS); hipFree(d_dE); hipFree(d_dW); hipFree(d_sums); hipFree(d_sums2); time12 = get_time(); //================================================================================80 // DISPLAY TIMING //================================================================================80 printf("Time spent in different stages of the application:\n"); printf("%15.12f s, %15.12f %% : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100); printf("%15.12f s, %15.12f %% : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time12-time0) / 1000000); } //====================================================================================================100 // END OF FILE //====================================================================================================100
98aa5949aac4af08b09382a8beaee669a9493d0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modified from // https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) //#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: box (5) [x1, y1, x2, y2, angle] const float MARGIN = 1e-5; float center_x = (box[0] + box[2]) / 2; float center_y = (box[1] + box[3]) / 2; float angle_cos = cos(-box[4]), angle_sin = sin(-box[4]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x; float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; #ifdef DEBUG printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]); printf( "center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, " "%.3f)\n", center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y); #endif return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (5) [x1, y1, x2, y2, angle] // params: box_b (5) [x1, y1, x2, y2, angle] float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = box_a[4]; float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4]; Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2); Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (5) [x1, y1, x2, y2, angle] // params: box_b (5) [x1, y1, x2, y2, angle] float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]); float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]); float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); hipLaunchKernelGGL(( boxes_overlap_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG hipDeviceSynchronize(); // for using printf in kernel function #endif } void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); hipLaunchKernelGGL(( boxes_iou_bev_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b, ans_iou); } void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask); } void nmsNormalLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); hipLaunchKernelGGL(( nms_normal_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask); }
98aa5949aac4af08b09382a8beaee669a9493d0b.cu
// Modified from // https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) //#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: box (5) [x1, y1, x2, y2, angle] const float MARGIN = 1e-5; float center_x = (box[0] + box[2]) / 2; float center_y = (box[1] + box[3]) / 2; float angle_cos = cos(-box[4]), angle_sin = sin(-box[4]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x; float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; #ifdef DEBUG printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]); printf( "center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, " "%.3f)\n", center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y); #endif return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (5) [x1, y1, x2, y2, angle] // params: box_b (5) [x1, y1, x2, y2, angle] float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = box_a[4]; float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4]; Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2); Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (5) [x1, y1, x2, y2, angle] // params: box_b (5) [x1, y1, x2, y2, angle] float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]); float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]); float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); } void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void nmsNormalLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); }
bf09c9d42a4126a8434dd131daa12be502fa5eb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The MIT License (MIT) Copyright (c) 2016 Charles Hubbard and Chinmay Hegde Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Experiment to characterize the ability of GPUFish to recover a rank-1 matrix */ #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <tuple> #include <math.h> #include <stdexcept> #include "gpu_fish_headers.h" #define RANK 1 #define BLOCKS 20 //========== define derivative and regulizer for gradient updates ============// __global__ void GradientUpdate(float* L, float* R, const int rank, float* dev_ratings, int* dev_offsets, int* dev_chunk_size, int round, float alpha, float average) { __shared__ float sh_L[RANK]; __shared__ float sh_R[RANK]; __shared__ float ijr[3]; int idx = blockIdx.x; int offset = dev_offsets[idx]; int N = dev_chunk_size[idx]/3; float B = 1; float m_hat = 0; int i = 0; int j = 0; float m = 0; float deriv; for (int p=0;p<N;p++) { for (int k=(threadIdx.x); k<3; k+= blockDim.x){ ijr[k] = dev_ratings[offset+p*3+k]; } i = (int) ijr[0]; j = (int) ijr[1]; m = ijr[2]; for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = L[i*RANK + k]; sh_R[k] = R[j*RANK + k]; } m_hat = 0; __syncthreads(); //========== get current estimate of rating ============// #pragma unroll for (int k=0;k<RANK;k++) { m_hat += sh_L[k]*sh_R[k]; } //========== calculate derivative: one-bit ============// if(m>average) { deriv = -1/(exp(m_hat) + 1); } else { deriv = exp(m_hat)/(exp(m_hat)+1); } /* Perform movielens analysis with squared error function //========== calculate derivative: squared error ============// deriv = 2*(m_hat-(m-average)); */ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = sh_L[k] - alpha*deriv*sh_R[k]; sh_R[k] = sh_R[k] - alpha*deriv*sh_L[k]; } __syncthreads(); //========== regulizer ============// float normL = 0; float normR = 0; #pragma unroll for (int k=0;k<RANK;k++) { normL+= sh_L[k]*sh_L[k]; normR+= sh_R[k]*sh_R[k]; } if (normL>B){ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = sh_L[k]*sqrt(B)/sqrt(normL); } } if (normR>B){ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_R[k] = sh_R[k]*sqrt(B)/sqrt(normR); } } for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { L[i*RANK + k] = sh_L[k]; R[j*RANK + k] = sh_R[k]; } } } struct dataSet; int main() { //========== define rank and number of cores to use ============// const int blocks = BLOCKS; const int rank = RANK; std::ofstream results; results.open("data/results.txt"); //========== loop through 30 training sets ============// for (int j=0; j<30; j++){ std::cout << "================================" << std::endl; std::vector<std::tuple<float,float,float>> host_all_ratings; std::ostringstream fstring; fstring << "data/" <<"train" << j+1 << "r" << 1; std::string fileString = fstring.str(); dataSet* data = readFile(fileString, &host_all_ratings); const int rows = data->rows; const int columns = data->columns; float* cpu_nums_R = new float[columns*rank]; float* cpu_nums_L = new float[rows*rank]; //========== loop through 9 test sets ============// for (int i=0; i<9;i++) { gpu_fish(&host_all_ratings, cpu_nums_L, cpu_nums_R, rows, columns, rank, blocks, data->numRatings, data->average); std::ostringstream tString; tString << "data/" <<"test" << i+1 << "r" << 1; std::string testString = tString.str(); std::fstream infile; infile.open(testString); if (!infile.is_open()) { std::cout << "file not found, exiting" << std::endl; return 0; } int user; int movie; float rating; int count = 0; int correct = 0; double m_hat = 0; float c = data->average; std::string line; while (getline(infile,line,'\n')) { m_hat = 0; std::stringstream stream(line); stream >> user >> movie >> rating; for (int k=0;k<rank;k++) { m_hat += cpu_nums_L[(user-1)*rank + k]*cpu_nums_R[(movie-1)*rank + k]; } if ((m_hat>=0) && (rating>=c)) { correct++; } if ((m_hat<0) && (rating<c)) { correct++; } count++; } double ratio = ((double) correct)/count; if (i==0) { results << "Number of samples: " << data->numRatings << std::endl; std::cout << "Number of samples: " << data->numRatings << std::endl; } //========== write recovery percentage to file ============// results << ratio << "\t"; infile.close(); } results << std::endl; delete[] cpu_nums_R; delete[] cpu_nums_L; } results.close(); return 0; }
bf09c9d42a4126a8434dd131daa12be502fa5eb0.cu
/* The MIT License (MIT) Copyright (c) 2016 Charles Hubbard and Chinmay Hegde Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Experiment to characterize the ability of GPUFish to recover a rank-1 matrix */ #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <tuple> #include <math.h> #include <stdexcept> #include "gpu_fish_headers.h" #define RANK 1 #define BLOCKS 20 //========== define derivative and regulizer for gradient updates ============// __global__ void GradientUpdate(float* L, float* R, const int rank, float* dev_ratings, int* dev_offsets, int* dev_chunk_size, int round, float alpha, float average) { __shared__ float sh_L[RANK]; __shared__ float sh_R[RANK]; __shared__ float ijr[3]; int idx = blockIdx.x; int offset = dev_offsets[idx]; int N = dev_chunk_size[idx]/3; float B = 1; float m_hat = 0; int i = 0; int j = 0; float m = 0; float deriv; for (int p=0;p<N;p++) { for (int k=(threadIdx.x); k<3; k+= blockDim.x){ ijr[k] = dev_ratings[offset+p*3+k]; } i = (int) ijr[0]; j = (int) ijr[1]; m = ijr[2]; for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = L[i*RANK + k]; sh_R[k] = R[j*RANK + k]; } m_hat = 0; __syncthreads(); //========== get current estimate of rating ============// #pragma unroll for (int k=0;k<RANK;k++) { m_hat += sh_L[k]*sh_R[k]; } //========== calculate derivative: one-bit ============// if(m>average) { deriv = -1/(exp(m_hat) + 1); } else { deriv = exp(m_hat)/(exp(m_hat)+1); } /* Perform movielens analysis with squared error function //========== calculate derivative: squared error ============// deriv = 2*(m_hat-(m-average)); */ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = sh_L[k] - alpha*deriv*sh_R[k]; sh_R[k] = sh_R[k] - alpha*deriv*sh_L[k]; } __syncthreads(); //========== regulizer ============// float normL = 0; float normR = 0; #pragma unroll for (int k=0;k<RANK;k++) { normL+= sh_L[k]*sh_L[k]; normR+= sh_R[k]*sh_R[k]; } if (normL>B){ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_L[k] = sh_L[k]*sqrt(B)/sqrt(normL); } } if (normR>B){ for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { sh_R[k] = sh_R[k]*sqrt(B)/sqrt(normR); } } for (int k=(threadIdx.x); k<RANK; k+= blockDim.x) { L[i*RANK + k] = sh_L[k]; R[j*RANK + k] = sh_R[k]; } } } struct dataSet; int main() { //========== define rank and number of cores to use ============// const int blocks = BLOCKS; const int rank = RANK; std::ofstream results; results.open("data/results.txt"); //========== loop through 30 training sets ============// for (int j=0; j<30; j++){ std::cout << "================================" << std::endl; std::vector<std::tuple<float,float,float>> host_all_ratings; std::ostringstream fstring; fstring << "data/" <<"train" << j+1 << "r" << 1; std::string fileString = fstring.str(); dataSet* data = readFile(fileString, &host_all_ratings); const int rows = data->rows; const int columns = data->columns; float* cpu_nums_R = new float[columns*rank]; float* cpu_nums_L = new float[rows*rank]; //========== loop through 9 test sets ============// for (int i=0; i<9;i++) { gpu_fish(&host_all_ratings, cpu_nums_L, cpu_nums_R, rows, columns, rank, blocks, data->numRatings, data->average); std::ostringstream tString; tString << "data/" <<"test" << i+1 << "r" << 1; std::string testString = tString.str(); std::fstream infile; infile.open(testString); if (!infile.is_open()) { std::cout << "file not found, exiting" << std::endl; return 0; } int user; int movie; float rating; int count = 0; int correct = 0; double m_hat = 0; float c = data->average; std::string line; while (getline(infile,line,'\n')) { m_hat = 0; std::stringstream stream(line); stream >> user >> movie >> rating; for (int k=0;k<rank;k++) { m_hat += cpu_nums_L[(user-1)*rank + k]*cpu_nums_R[(movie-1)*rank + k]; } if ((m_hat>=0) && (rating>=c)) { correct++; } if ((m_hat<0) && (rating<c)) { correct++; } count++; } double ratio = ((double) correct)/count; if (i==0) { results << "Number of samples: " << data->numRatings << std::endl; std::cout << "Number of samples: " << data->numRatings << std::endl; } //========== write recovery percentage to file ============// results << ratio << "\t"; infile.close(); } results << std::endl; delete[] cpu_nums_R; delete[] cpu_nums_L; } results.close(); return 0; }
31e17fe7b781316895fb24617aef3aede1503664.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for (i = 0; i < n; ++i) { mean += abs(input[i * size + s]); } mean = mean / n; for (i = 0; i < n; ++i) { binary[i * size + s] = (input[i * size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += abs(weights[f * size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f * size + i] = (weights[f * size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary); check_error(hipPeekAtLastError()); } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs * l.batch, 0, l.output_gpu, 1); if (l.binary) { binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu); swap_binary(&l); } if (l.xnor) { binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c / l.groups * l.h * l.w * l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int m = l.n; // output channel int k = l.size * l.size * l.c; // kernel size, input channel int n = l.out_h * l.out_w; // output size float *a = l.weights_gpu; float *c = l.output_gpu; int group_size = l.c / l.groups; int group_step = l.h * l.w * group_size; k = k / l.groups; m = m / l.groups; int i, j; for (i = 0; i < l.batch; ++i) { for (j = 0; j < l.groups; j++) { float *aoffset = a + j * k; float *boffset = net.workspace; float *coffset = c + j * n * group_size; float *inputoffset = net.input_gpu + group_step * j; im2col_gpu(inputoffset, group_size, l.h, l.w, l.size, l.stride, l.pad, boffset); gemm_gpu(0, 0, m, n, k, 1, aoffset, k, boffset, n, 1, coffset, n); } c += l.out_h * l.out_w * l.n; net.input_gpu += l.c * l.h * l.w; } #endif /*cuda_pull_array(l.output_gpu, l.output, l.batch * l.outputs); image im = float_to_image(l.out_w, l.out_h, l.n, l.output); printf("\nfilter:\n"); print_image(im);*/ if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h); } activate_array_gpu(l.output_gpu, l.outputs * l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if (l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -(size / 2.); int h_offset = -(size / 2.); int out_index = j + w * (i + h * (k + c * b)); int l, m; for (l = 0; l < size; ++l) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i + l; int cur_w = w_offset + j + m; int index = cur_w + w * (cur_h + h * (k + b * c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); delta[out_index] += valid ? rate * (x[index] - x[out_index]) : 0; } } } extern "C" void smooth_layer(layer l, int size, float rate) { int h = l.out_h; int w = l.out_w; int c = l.out_c; size_t n = h * w * c * l.batch; hipLaunchKernelGGL(( smooth_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); check_error(hipPeekAtLastError()); } void backward_convolutional_layer_gpu(convolutional_layer l, network net) { if (l.smooth) { smooth_layer(l, 5, l.smooth); } constrain_gpu(l.outputs * l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs * l.batch, l.activation, l.delta_gpu); if (l.batch_normalize) { backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w * l.out_h); } float *original_input = net.input_gpu; if (l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if (net.delta_gpu) { if (l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_gpu(original_input, l.batch * l.c * l.h * l.w, HARDTAN, net.delta_gpu); } #else int i, j; int m = l.n; int n = l.size * l.size * l.c; int k = l.out_w * l.out_h; int group_size = l.c / l.groups; int group_step = l.h * l.w * group_size; n = n / l.groups; m = m / l.groups; for (i = 0; i < l.batch; ++i) { float *input_data = net.input_gpu + i * l.c * l.h * l.w; float *deltas = l.delta_gpu + i * l.n * l.out_w * l.out_h; float *outdeltas = net.delta_gpu + i * l.c * l.w *l.h; for (j = 0; j < l.groups; j++) { float *im = input_data + j * group_step; float *aoffset = deltas + j * group_size * k; float *boffset = net.workspace; float *coffset = l.weight_updates_gpu + j * n; im2col_gpu(im, group_size, l.h, l.w, l.size, l.stride, l.pad, boffset); gemm_gpu(0, 1, m, n, k, 1, aoffset, k, boffset, k, 1, coffset, n); if (net.delta_gpu) { if(l.binary || l.xnor) swap_binary(&l); aoffset = l.weights_gpu + j * n; boffset = deltas + j * group_size * k; coffset = net.workspace; gemm_gpu(1, 0, n, k, m, 1, aoffset, n, boffset, k, 0, coffset, k); col2im_gpu(net.workspace, group_size, l.h, l.w, l.size, l.stride, l.pad, outdeltas + j * group_step); if(l.binary || l.xnor) swap_binary(&l); } } if (net.delta_gpu && l.xnor) { gradient_array_gpu(original_input + i * l.c * l.h * l.w, l.c * l.h * l.w, HARDTAN, net.delta_gpu + i * l.c * l.h * l.w); } } #endif } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.nweights); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.nweights); cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize) { cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.nweights); cuda_push_array(layer.biases_gpu, layer.biases, layer.n); cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.nweights); cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize) { cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void update_convolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate * l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; int size = l.nweights; if (a.adam) { adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, size, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if (l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } } else { axpy_gpu(size, -decay * batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(size, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(size, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if (l.scales_gpu) { axpy_gpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
31e17fe7b781316895fb24617aef3aede1503664.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for (i = 0; i < n; ++i) { mean += abs(input[i * size + s]); } mean = mean / n; for (i = 0; i < n; ++i) { binary[i * size + s] = (input[i * size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += abs(weights[f * size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f * size + i] = (weights[f * size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary); check_error(cudaPeekAtLastError()); } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs * l.batch, 0, l.output_gpu, 1); if (l.binary) { binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu); swap_binary(&l); } if (l.xnor) { binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c / l.groups * l.h * l.w * l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int m = l.n; // output channel int k = l.size * l.size * l.c; // kernel size, input channel int n = l.out_h * l.out_w; // output size float *a = l.weights_gpu; float *c = l.output_gpu; int group_size = l.c / l.groups; int group_step = l.h * l.w * group_size; k = k / l.groups; m = m / l.groups; int i, j; for (i = 0; i < l.batch; ++i) { for (j = 0; j < l.groups; j++) { float *aoffset = a + j * k; float *boffset = net.workspace; float *coffset = c + j * n * group_size; float *inputoffset = net.input_gpu + group_step * j; im2col_gpu(inputoffset, group_size, l.h, l.w, l.size, l.stride, l.pad, boffset); gemm_gpu(0, 0, m, n, k, 1, aoffset, k, boffset, n, 1, coffset, n); } c += l.out_h * l.out_w * l.n; net.input_gpu += l.c * l.h * l.w; } #endif /*cuda_pull_array(l.output_gpu, l.output, l.batch * l.outputs); image im = float_to_image(l.out_w, l.out_h, l.n, l.output); printf("\nfilter:\n"); print_image(im);*/ if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h); } activate_array_gpu(l.output_gpu, l.outputs * l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if (l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -(size / 2.); int h_offset = -(size / 2.); int out_index = j + w * (i + h * (k + c * b)); int l, m; for (l = 0; l < size; ++l) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i + l; int cur_w = w_offset + j + m; int index = cur_w + w * (cur_h + h * (k + b * c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); delta[out_index] += valid ? rate * (x[index] - x[out_index]) : 0; } } } extern "C" void smooth_layer(layer l, int size, float rate) { int h = l.out_h; int w = l.out_w; int c = l.out_c; size_t n = h * w * c * l.batch; smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); check_error(cudaPeekAtLastError()); } void backward_convolutional_layer_gpu(convolutional_layer l, network net) { if (l.smooth) { smooth_layer(l, 5, l.smooth); } constrain_gpu(l.outputs * l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs * l.batch, l.activation, l.delta_gpu); if (l.batch_normalize) { backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w * l.out_h); } float *original_input = net.input_gpu; if (l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if (net.delta_gpu) { if (l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_gpu(original_input, l.batch * l.c * l.h * l.w, HARDTAN, net.delta_gpu); } #else int i, j; int m = l.n; int n = l.size * l.size * l.c; int k = l.out_w * l.out_h; int group_size = l.c / l.groups; int group_step = l.h * l.w * group_size; n = n / l.groups; m = m / l.groups; for (i = 0; i < l.batch; ++i) { float *input_data = net.input_gpu + i * l.c * l.h * l.w; float *deltas = l.delta_gpu + i * l.n * l.out_w * l.out_h; float *outdeltas = net.delta_gpu + i * l.c * l.w *l.h; for (j = 0; j < l.groups; j++) { float *im = input_data + j * group_step; float *aoffset = deltas + j * group_size * k; float *boffset = net.workspace; float *coffset = l.weight_updates_gpu + j * n; im2col_gpu(im, group_size, l.h, l.w, l.size, l.stride, l.pad, boffset); gemm_gpu(0, 1, m, n, k, 1, aoffset, k, boffset, k, 1, coffset, n); if (net.delta_gpu) { if(l.binary || l.xnor) swap_binary(&l); aoffset = l.weights_gpu + j * n; boffset = deltas + j * group_size * k; coffset = net.workspace; gemm_gpu(1, 0, n, k, m, 1, aoffset, n, boffset, k, 0, coffset, k); col2im_gpu(net.workspace, group_size, l.h, l.w, l.size, l.stride, l.pad, outdeltas + j * group_step); if(l.binary || l.xnor) swap_binary(&l); } } if (net.delta_gpu && l.xnor) { gradient_array_gpu(original_input + i * l.c * l.h * l.w, l.c * l.h * l.w, HARDTAN, net.delta_gpu + i * l.c * l.h * l.w); } } #endif } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.nweights); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.nweights); cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize) { cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.nweights); cuda_push_array(layer.biases_gpu, layer.biases, layer.n); cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.nweights); cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize) { cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void update_convolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate * l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; int size = l.nweights; if (a.adam) { adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, size, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if (l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } } else { axpy_gpu(size, -decay * batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(size, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(size, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if (l.scales_gpu) { axpy_gpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
004a68d5b207d1138901ed18e29e6d818441c4af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <thrust/sort.h> #include "modules/perception/inference/tensorrt/plugins/kernels.h" #include "modules/perception/inference/tensorrt/plugins/rpn_proposal_ssd_plugin.h" namespace apollo { namespace perception { namespace inference { // TODO(chenjiahao): add heat_map_b as anchor_offset // output anchors dims: [H, W, num_anchor_per_point, 4] __global__ void generate_anchors_kernel(const int height, const int width, const float anchor_stride, const int num_anchor_per_point, const float *anchor_heights, const float *anchor_widths, float *anchors) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int num_anchor = height * width * num_anchor_per_point; if (index >= num_anchor) { return; } float anchor_offset = 0; int pos_index = index / num_anchor_per_point; int anchor_id = index % num_anchor_per_point; int w_i = pos_index % width; int h_i = pos_index / width; // center coordinates float x_ctr = w_i * anchor_stride + anchor_offset; float y_ctr = h_i * anchor_stride + anchor_offset; float x_min = x_ctr - 0.5 * (anchor_widths[anchor_id] - 1); float y_min = y_ctr - 0.5 * (anchor_heights[anchor_id] - 1); float x_max = x_ctr + 0.5 * (anchor_widths[anchor_id] - 1); float y_max = y_ctr + 0.5 * (anchor_heights[anchor_id] - 1); anchors[index * 4] = x_min; anchors[index * 4 + 1] = y_min; anchors[index * 4 + 2] = x_max; anchors[index * 4 + 3] = y_max; } // in_boxes dims: [N, num_box_per_point * 4, H, W], // out_boxes dims: [N, H * W * num_box_per_point 4] template <typename Dtype> __global__ void reshape_boxes_kernel(const int nthreads, const Dtype *in_boxes, const int height, const int width, const int num_box_per_point, Dtype *out_boxes) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 4; int feature_id = index % 4; int box_id = (index / 4) % num_box_per_point; int point_id = (index / num_box_per_point / 4) % num_point; int in_index = ((batch_id * num_box_per_point + box_id) * 4 + feature_id) * num_point + point_id; out_boxes[index] = in_boxes[in_index]; } } // in_scores dims: [N, 2 * num_box_per_point, H, W], // out_scores dims: [N, H * W * num_box_per_point, 2] template <typename Dtype> __global__ void reshape_scores_kernel(const int nthreads, const Dtype *in_scores, const int height, const int width, const int num_box_per_point, Dtype *out_scores) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 2; int class_id = index % 2; int box_id = (index / 2) % num_box_per_point; int point_id = (index / num_box_per_point / 2) % num_point; int in_index = ((batch_id * 2 + class_id) * num_box_per_point + box_id) * num_point + point_id; out_scores[index] = in_scores[in_index]; } } int RPNProposalSSDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { // dimsNCHW: [N, 2 * num_anchor_per_point, H, W] const float *rpn_cls_prob_reshape = reinterpret_cast<const float *>(inputs[0]); // dimsNCHW: [N, num_anchor_per_point * 4, H, W] const float *rpn_bbox_pred = reinterpret_cast<const float *>(inputs[1]); // dims: [N, 6, 1, 1] const float *im_info = reinterpret_cast<const float *>(inputs[2]); float *out_rois = reinterpret_cast<float *>(outputs[0]); float *host_im_info = new float[batchSize * 6](); BASE_CUDA_CHECK(hipMemcpyAsync(host_im_info, im_info, batchSize * 6 * sizeof(float), hipMemcpyDeviceToHost, stream)); const int origin_height = (int)(host_im_info[0]); const int origin_width = (int)(host_im_info[1]); int num_anchor = height_ * width_ * num_anchor_per_point_; int rpn_bbox_pred_size = batchSize * num_anchor * 4; int scores_size = batchSize * num_anchor * 2; int anchors_size = num_anchor * 4; int out_rois_size = batchSize * top_n_ * 5; // Using thrust::fill might cause crash float *init_out_rois = new float[out_rois_size](); std::fill_n(init_out_rois, out_rois_size, -1.0f); BASE_CUDA_CHECK(hipMemcpyAsync(out_rois, init_out_rois, out_rois_size * sizeof(float), hipMemcpyHostToDevice, stream)); int block_size, nthreads; // reshape to [N, num_anchor, 4] float *temp_rpn_bbox_pred; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&temp_rpn_bbox_pred), rpn_bbox_pred_size * sizeof(float))); nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; hipLaunchKernelGGL(( reshape_boxes_kernel), dim3(block_size), dim3(thread_size_), 0, stream, nthreads, rpn_bbox_pred, height_, width_, num_anchor_per_point_, temp_rpn_bbox_pred); // Normalization float *dev_bbox_mean, *dev_bbox_std; BASE_CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float))); BASE_CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float))); BASE_CUDA_CHECK(hipMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float), hipMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(hipMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float), hipMemcpyHostToDevice, stream)); repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_std, 4); repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_mean, 4); // generate anchors float *anchors, *dev_anchor_heights, *dev_anchor_widths; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&anchors), anchors_size * sizeof(float))); BASE_CUDA_CHECK( hipMemsetAsync(anchors, 0, anchors_size * sizeof(float), stream)); BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&dev_anchor_heights), num_anchor_per_point_ * sizeof(float))); BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&dev_anchor_widths), num_anchor_per_point_ * sizeof(float))); BASE_CUDA_CHECK(hipMemsetAsync( dev_anchor_heights, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_CUDA_CHECK(hipMemsetAsync( dev_anchor_widths, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_CUDA_CHECK(hipMemcpyAsync(dev_anchor_heights, anchor_heights_, num_anchor_per_point_ * sizeof(float), hipMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(hipMemcpyAsync(dev_anchor_widths, anchor_widths_, num_anchor_per_point_ * sizeof(float), hipMemcpyHostToDevice, stream)); block_size = (anchors_size - 1) / thread_size_ + 1; hipLaunchKernelGGL(( generate_anchors_kernel), dim3(block_size), dim3(thread_size_), 0, stream, height_, width_, heat_map_a_, num_anchor_per_point_, dev_anchor_heights, dev_anchor_widths, anchors); // decode bbox float *proposals; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&proposals), rpn_bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(hipMemsetAsync(proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads, anchors, temp_rpn_bbox_pred, num_anchor, 1, proposals); // clip boxes, i.e. refine proposals which are out of map if (refine_out_of_map_bbox_) { nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, (float)origin_height, (float)origin_width); } // reshape scores to [N, num_anchor, 2] float *temp_scores; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&temp_scores), scores_size * sizeof(float))); nthreads = scores_size; block_size = (nthreads - 1) / thread_size_ + 1; hipLaunchKernelGGL(( reshape_scores_kernel), dim3(block_size), dim3(thread_size_), 0, stream, nthreads, rpn_cls_prob_reshape, height_, width_, num_anchor_per_point_, temp_scores); // filter boxes according to min_size_mode and threshold_objectness float *filtered_proposals, *filtered_scores; int *filtered_count; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_proposals), rpn_bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_scores), batchSize * num_anchor * sizeof(float))); BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_count), batchSize * sizeof(int))); BASE_CUDA_CHECK(hipMemsetAsync(filtered_proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); BASE_CUDA_CHECK(hipMemsetAsync( filtered_scores, 0, batchSize * num_anchor * sizeof(float), stream)); BASE_CUDA_CHECK( hipMemsetAsync(filtered_count, 0, batchSize * sizeof(int), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; // TODO(chenjiahao): filter area filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, temp_scores, nullptr, num_anchor, 1, 2, 0, 0, 1, min_size_mode_, min_size_h_, min_size_w_, threshold_objectness_, filtered_proposals, filtered_scores, nullptr, filtered_count); int *host_filtered_count = new int[batchSize](); BASE_CUDA_CHECK(hipMemcpyAsync(host_filtered_count, filtered_count, batchSize * sizeof(int), hipMemcpyDeviceToHost, stream)); // descending sort proposals by score int *sorted_indexes; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&sorted_indexes), batchSize * num_anchor * sizeof(int))); for (int i = 0; i < batchSize; ++i) { thrust::sequence(thrust::device, sorted_indexes + i * num_anchor, sorted_indexes + i * num_anchor + host_filtered_count[i]); thrust::sort_by_key( thrust::device, filtered_scores + size_t(i * num_anchor), filtered_scores + size_t(i * num_anchor + host_filtered_count[i]), sorted_indexes + i * num_anchor, thrust::greater<float>()); } // keep max N candidates float *pre_nms_proposals; BASE_CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&pre_nms_proposals), batchSize * max_candidate_n_ * 4 * sizeof(float))); BASE_CUDA_CHECK(hipMemsetAsync( pre_nms_proposals, 0, batchSize * max_candidate_n_ * 4 * sizeof(float), stream)); nthreads = batchSize * max_candidate_n_; block_size = (nthreads - 1) / thread_size_ + 1; keep_topN_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, filtered_proposals, nullptr, nullptr, sorted_indexes, filtered_count, false, num_anchor, 0, max_candidate_n_, pre_nms_proposals, nullptr, nullptr); // Nms, keep top N proposals and output final proposals // output dims: [num_roi, 5] (axis-1: batch_id, x_min, y_min, x_max, y_max) int acc_box_num = 0; for (int i = 0; i < batchSize; ++i) { int cur_filter_count = ::min(host_filtered_count[i], max_candidate_n_); NmsForward( false, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, i, 0, pre_nms_proposals + size_t(i * max_candidate_n_ * 4), nullptr, nullptr, out_rois + size_t(acc_box_num * 5), &acc_box_num, stream); } out_rois_num_ = acc_box_num; // Free cuda memory BASE_CUDA_CHECK(hipFree(temp_rpn_bbox_pred)); BASE_CUDA_CHECK(hipFree(dev_bbox_mean)); BASE_CUDA_CHECK(hipFree(dev_bbox_std)); BASE_CUDA_CHECK(hipFree(anchors)); BASE_CUDA_CHECK(hipFree(dev_anchor_heights)); BASE_CUDA_CHECK(hipFree(dev_anchor_widths)); BASE_CUDA_CHECK(hipFree(proposals)); BASE_CUDA_CHECK(hipFree(temp_scores)); BASE_CUDA_CHECK(hipFree(filtered_proposals)); BASE_CUDA_CHECK(hipFree(filtered_scores)); BASE_CUDA_CHECK(hipFree(filtered_count)); BASE_CUDA_CHECK(hipFree(sorted_indexes)); BASE_CUDA_CHECK(hipFree(pre_nms_proposals)); // Free host memory delete[] host_im_info; delete[] host_filtered_count; delete[] init_out_rois; return 0; } } // namespace inference } // namespace perception } // namespace apollo
004a68d5b207d1138901ed18e29e6d818441c4af.cu
/****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <thrust/sort.h> #include "modules/perception/inference/tensorrt/plugins/kernels.h" #include "modules/perception/inference/tensorrt/plugins/rpn_proposal_ssd_plugin.h" namespace apollo { namespace perception { namespace inference { // TODO(chenjiahao): add heat_map_b as anchor_offset // output anchors dims: [H, W, num_anchor_per_point, 4] __global__ void generate_anchors_kernel(const int height, const int width, const float anchor_stride, const int num_anchor_per_point, const float *anchor_heights, const float *anchor_widths, float *anchors) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int num_anchor = height * width * num_anchor_per_point; if (index >= num_anchor) { return; } float anchor_offset = 0; int pos_index = index / num_anchor_per_point; int anchor_id = index % num_anchor_per_point; int w_i = pos_index % width; int h_i = pos_index / width; // center coordinates float x_ctr = w_i * anchor_stride + anchor_offset; float y_ctr = h_i * anchor_stride + anchor_offset; float x_min = x_ctr - 0.5 * (anchor_widths[anchor_id] - 1); float y_min = y_ctr - 0.5 * (anchor_heights[anchor_id] - 1); float x_max = x_ctr + 0.5 * (anchor_widths[anchor_id] - 1); float y_max = y_ctr + 0.5 * (anchor_heights[anchor_id] - 1); anchors[index * 4] = x_min; anchors[index * 4 + 1] = y_min; anchors[index * 4 + 2] = x_max; anchors[index * 4 + 3] = y_max; } // in_boxes dims: [N, num_box_per_point * 4, H, W], // out_boxes dims: [N, H * W * num_box_per_point, 4] template <typename Dtype> __global__ void reshape_boxes_kernel(const int nthreads, const Dtype *in_boxes, const int height, const int width, const int num_box_per_point, Dtype *out_boxes) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 4; int feature_id = index % 4; int box_id = (index / 4) % num_box_per_point; int point_id = (index / num_box_per_point / 4) % num_point; int in_index = ((batch_id * num_box_per_point + box_id) * 4 + feature_id) * num_point + point_id; out_boxes[index] = in_boxes[in_index]; } } // in_scores dims: [N, 2 * num_box_per_point, H, W], // out_scores dims: [N, H * W * num_box_per_point, 2] template <typename Dtype> __global__ void reshape_scores_kernel(const int nthreads, const Dtype *in_scores, const int height, const int width, const int num_box_per_point, Dtype *out_scores) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 2; int class_id = index % 2; int box_id = (index / 2) % num_box_per_point; int point_id = (index / num_box_per_point / 2) % num_point; int in_index = ((batch_id * 2 + class_id) * num_box_per_point + box_id) * num_point + point_id; out_scores[index] = in_scores[in_index]; } } int RPNProposalSSDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { // dimsNCHW: [N, 2 * num_anchor_per_point, H, W] const float *rpn_cls_prob_reshape = reinterpret_cast<const float *>(inputs[0]); // dimsNCHW: [N, num_anchor_per_point * 4, H, W] const float *rpn_bbox_pred = reinterpret_cast<const float *>(inputs[1]); // dims: [N, 6, 1, 1] const float *im_info = reinterpret_cast<const float *>(inputs[2]); float *out_rois = reinterpret_cast<float *>(outputs[0]); float *host_im_info = new float[batchSize * 6](); BASE_CUDA_CHECK(cudaMemcpyAsync(host_im_info, im_info, batchSize * 6 * sizeof(float), cudaMemcpyDeviceToHost, stream)); const int origin_height = (int)(host_im_info[0]); const int origin_width = (int)(host_im_info[1]); int num_anchor = height_ * width_ * num_anchor_per_point_; int rpn_bbox_pred_size = batchSize * num_anchor * 4; int scores_size = batchSize * num_anchor * 2; int anchors_size = num_anchor * 4; int out_rois_size = batchSize * top_n_ * 5; // Using thrust::fill might cause crash float *init_out_rois = new float[out_rois_size](); std::fill_n(init_out_rois, out_rois_size, -1.0f); BASE_CUDA_CHECK(cudaMemcpyAsync(out_rois, init_out_rois, out_rois_size * sizeof(float), cudaMemcpyHostToDevice, stream)); int block_size, nthreads; // reshape to [N, num_anchor, 4] float *temp_rpn_bbox_pred; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_rpn_bbox_pred), rpn_bbox_pred_size * sizeof(float))); nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; reshape_boxes_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, rpn_bbox_pred, height_, width_, num_anchor_per_point_, temp_rpn_bbox_pred); // Normalization float *dev_bbox_mean, *dev_bbox_std; BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_std, 4); repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_mean, 4); // generate anchors float *anchors, *dev_anchor_heights, *dev_anchor_widths; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&anchors), anchors_size * sizeof(float))); BASE_CUDA_CHECK( cudaMemsetAsync(anchors, 0, anchors_size * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_heights), num_anchor_per_point_ * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_widths), num_anchor_per_point_ * sizeof(float))); BASE_CUDA_CHECK(cudaMemsetAsync( dev_anchor_heights, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync( dev_anchor_widths, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_anchor_heights, anchor_heights_, num_anchor_per_point_ * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_anchor_widths, anchor_widths_, num_anchor_per_point_ * sizeof(float), cudaMemcpyHostToDevice, stream)); block_size = (anchors_size - 1) / thread_size_ + 1; generate_anchors_kernel<<<block_size, thread_size_, 0, stream>>>( height_, width_, heat_map_a_, num_anchor_per_point_, dev_anchor_heights, dev_anchor_widths, anchors); // decode bbox float *proposals; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&proposals), rpn_bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(cudaMemsetAsync(proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads, anchors, temp_rpn_bbox_pred, num_anchor, 1, proposals); // clip boxes, i.e. refine proposals which are out of map if (refine_out_of_map_bbox_) { nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, (float)origin_height, (float)origin_width); } // reshape scores to [N, num_anchor, 2] float *temp_scores; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_scores), scores_size * sizeof(float))); nthreads = scores_size; block_size = (nthreads - 1) / thread_size_ + 1; reshape_scores_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, rpn_cls_prob_reshape, height_, width_, num_anchor_per_point_, temp_scores); // filter boxes according to min_size_mode and threshold_objectness float *filtered_proposals, *filtered_scores; int *filtered_count; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_proposals), rpn_bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_scores), batchSize * num_anchor * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_count), batchSize * sizeof(int))); BASE_CUDA_CHECK(cudaMemsetAsync(filtered_proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync( filtered_scores, 0, batchSize * num_anchor * sizeof(float), stream)); BASE_CUDA_CHECK( cudaMemsetAsync(filtered_count, 0, batchSize * sizeof(int), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; // TODO(chenjiahao): filter area filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, temp_scores, nullptr, num_anchor, 1, 2, 0, 0, 1, min_size_mode_, min_size_h_, min_size_w_, threshold_objectness_, filtered_proposals, filtered_scores, nullptr, filtered_count); int *host_filtered_count = new int[batchSize](); BASE_CUDA_CHECK(cudaMemcpyAsync(host_filtered_count, filtered_count, batchSize * sizeof(int), cudaMemcpyDeviceToHost, stream)); // descending sort proposals by score int *sorted_indexes; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&sorted_indexes), batchSize * num_anchor * sizeof(int))); for (int i = 0; i < batchSize; ++i) { thrust::sequence(thrust::device, sorted_indexes + i * num_anchor, sorted_indexes + i * num_anchor + host_filtered_count[i]); thrust::sort_by_key( thrust::device, filtered_scores + size_t(i * num_anchor), filtered_scores + size_t(i * num_anchor + host_filtered_count[i]), sorted_indexes + i * num_anchor, thrust::greater<float>()); } // keep max N candidates float *pre_nms_proposals; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&pre_nms_proposals), batchSize * max_candidate_n_ * 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMemsetAsync( pre_nms_proposals, 0, batchSize * max_candidate_n_ * 4 * sizeof(float), stream)); nthreads = batchSize * max_candidate_n_; block_size = (nthreads - 1) / thread_size_ + 1; keep_topN_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, filtered_proposals, nullptr, nullptr, sorted_indexes, filtered_count, false, num_anchor, 0, max_candidate_n_, pre_nms_proposals, nullptr, nullptr); // Nms, keep top N proposals and output final proposals // output dims: [num_roi, 5] (axis-1: batch_id, x_min, y_min, x_max, y_max) int acc_box_num = 0; for (int i = 0; i < batchSize; ++i) { int cur_filter_count = std::min(host_filtered_count[i], max_candidate_n_); NmsForward( false, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, i, 0, pre_nms_proposals + size_t(i * max_candidate_n_ * 4), nullptr, nullptr, out_rois + size_t(acc_box_num * 5), &acc_box_num, stream); } out_rois_num_ = acc_box_num; // Free cuda memory BASE_CUDA_CHECK(cudaFree(temp_rpn_bbox_pred)); BASE_CUDA_CHECK(cudaFree(dev_bbox_mean)); BASE_CUDA_CHECK(cudaFree(dev_bbox_std)); BASE_CUDA_CHECK(cudaFree(anchors)); BASE_CUDA_CHECK(cudaFree(dev_anchor_heights)); BASE_CUDA_CHECK(cudaFree(dev_anchor_widths)); BASE_CUDA_CHECK(cudaFree(proposals)); BASE_CUDA_CHECK(cudaFree(temp_scores)); BASE_CUDA_CHECK(cudaFree(filtered_proposals)); BASE_CUDA_CHECK(cudaFree(filtered_scores)); BASE_CUDA_CHECK(cudaFree(filtered_count)); BASE_CUDA_CHECK(cudaFree(sorted_indexes)); BASE_CUDA_CHECK(cudaFree(pre_nms_proposals)); // Free host memory delete[] host_im_info; delete[] host_filtered_count; delete[] init_out_rois; return 0; } } // namespace inference } // namespace perception } // namespace apollo
967094fe4bfb7bd088a3d15112e5b3afb47103f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/operators/minmax_ops.h" #include "caffe2/operators/utility_ops.h" #include "caffe2/utils/math.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/hip/execution_policy.h> #include <thrust/unique.h> namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float, float>(); } else if (Input(0).IsType<float16>()) { return DoRunWithType<float16, float16>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> class CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext> : public Operator<CUDAContext> { public: CopyOnDeviceLikeOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} USE_OPERATOR_FUNCTIONS(CUDAContext); bool RunOnDevice() override { auto& input = Input(0); auto* output = OperatorBase::Output<Tensor<CUDAContext>>(0); CUDAContext context(GetGPUIDForPointer(Input(1).raw_data())); output->ResizeLike(input); context.template CopyItems<CUDAContext, CUDAContext>( input.meta(), input.size(), input.raw_data(), output->raw_mutable_data(input.meta())); return true; } }; REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); // From CPU, copy it to whatever the current context REGISTER_CUDA_OPERATOR( CopyFromCPUInput, CopyOp<CUDAContext, CUDAContext, CPUContext>); // CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context, // since gpu code will be involved. REGISTER_CUDA_OPERATOR( CopyGPUToCPU, CopyOp<CUDAContext, CPUContext, CUDAContext>); REGISTER_CUDA_OPERATOR( CopyCPUToGPU, CopyOp<CUDAContext, CUDAContext, CPUContext>); // If we only specify Copy, we assume that it is a gpu to gpu copy - maybe // involving different GPUs. REGISTER_CUDA_OPERATOR(Copy, CopyOp<CUDAContext, CUDAContext, CUDAContext>); REGISTER_CUDA_OPERATOR( CopyOnDeviceLike, CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext>); REGISTER_CUDA_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp<CUDAContext>); CAFFE_KNOWN_TYPE(const float*); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.size(); const float* data_ptr = X.data<float>(); scratch_.Resize(1); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); hipLaunchKernelGGL(( NanCheckKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), scratch_.mutable_data<bool>()); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(hipMemcpyAsync( &result, scratch_.raw_data(), 1, hipMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { TensorCPU cpu_X; cpu_X.ResizeLike(Input(j)); // Hack to cause allocaiton happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j), &context_); } context_.FinishDeviceComputation(); std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.size(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, &context_); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); __global__ void ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { maxout[i] = max(X[i], Y[i]); } } template <> bool MaxOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-maxes for (int i = 1; i < InputSize(); ++i) { hipLaunchKernelGGL(( ElwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>); __global__ void ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { minout[i] = min(X[i], Y[i]); } } template <> bool MinOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-mines for (int i = 1; i < InputSize(); ++i) { hipLaunchKernelGGL(( ElwiseMinKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>); template <typename T> __global__ void MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) { CUDA_1D_KERNEL_LOOP(i, N) { gi[i] = go[i] * (mx[i] == x[i]); } } template <> bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() { auto& output = Input(0); auto& grad_output = Input(1); const int kInputStartOffset = 2; const float* data = output.data<float>(); for (int i = 0; i < OutputSize(); i++) { auto& input = Input(i + kInputStartOffset); auto* grad_input = Output(i); grad_input->ResizeLike(input); hipLaunchKernelGGL(( MaxMinGradKernel), dim3(CAFFE_GET_BLOCKS(input.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), input.size(), output.data<float>(), input.data<float>(), grad_output.data<float>(), grad_input->mutable_data<float>()); } return true; } template <typename T_INDEX> __global__ void GatherKernel( const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) { for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = indices[i]; const float* src_offset = X + idx * block_size; float* dst_offset = Y + i * block_size; for (int j = threadIdx.x; j < block_size; j += blockDim.x) { dst_offset[j] = src_offset[j]; } } } template <> bool GatherOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, OperatorBase::Input<TensorCUDA>(INDICES)); } template <> template <typename Index> bool GatherOp<CUDAContext>::DoRunWithType() { auto& data = Input(DATA); auto& indices = Input(INDICES); auto* output = Output(0); CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D"); auto shape = indices.dims(); shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end()); output->Resize(shape); int block_size = data.size() / data.dim(0); auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize(); CAFFE_ENFORCE( block_bytesize == data.nbytes() / data.dim(0), "block_bytesize should be consistent with data dim"); int N = indices.size(); auto src_base = static_cast<const float*>(data.raw_data()); const Index* idxs = indices.template data<Index>(); auto out = static_cast<float*>(output->raw_mutable_data(data.meta())); // return early when the input is empty, since CUDA kernel will fail for // empty input. if (N <= 0) { return true; } hipLaunchKernelGGL(( GatherKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), src_base, out, idxs, N, block_size); return true; } REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>); /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const TIndex N, const TIndex B, const TIndex slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const TIndex M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.size(), 0); CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.size(), 1); TIndex M = X0.size(); TIndex N = X0.dim(0); TIndex K = indices.size(); TIndex block_size = M / N; float* data = output->template mutable_data<float>(); // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. const TIndex B = (InputSize() - 3) / 2; x_data_host_.Resize(B); weights_host_.Resize(B); x_data_device_.Resize(B); weights_device_.Resize(B); const float** x_data_host = x_data_host_.mutable_data<const float*>(); const float** weights_host = weights_host_.mutable_data<const float*>(); const float** x_data_device = x_data_device_.mutable_data<const float*>(); const float** weights_device = weights_device_.mutable_data<const float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data()); } context_.Copy<const float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<const float*, CPUContext, CUDAContext>( B, weights_host, weights_device); hipLaunchKernelGGL(( AxpySliceKernel), dim3(std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), weight0.template data<float>(), K, B, block_size, weights_device, x_data_device, indices.template data<Index>(), data, M); return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, TIndex N, TIndex K, TIndex block_size) { for (TIndex i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (TIndex j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, TIndex N, TIndex K, TIndex block_size) { hipLaunchKernelGGL(( scatter_assign_kernel), dim3(::min(K, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), data, idxs, slicesData, N, K, block_size); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor<CUDAContext>* output) { int N = output->size(); hipLaunchKernelGGL(( RangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, output->mutable_data<T>(), start, step); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
967094fe4bfb7bd088a3d15112e5b3afb47103f2.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/operators/minmax_ops.h" #include "caffe2/operators/utility_ops.h" #include "caffe2/utils/math.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/cuda/execution_policy.h> #include <thrust/unique.h> namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float, float>(); } else if (Input(0).IsType<float16>()) { return DoRunWithType<float16, float16>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> class CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext> : public Operator<CUDAContext> { public: CopyOnDeviceLikeOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} USE_OPERATOR_FUNCTIONS(CUDAContext); bool RunOnDevice() override { auto& input = Input(0); auto* output = OperatorBase::Output<Tensor<CUDAContext>>(0); CUDAContext context(GetGPUIDForPointer(Input(1).raw_data())); output->ResizeLike(input); context.template CopyItems<CUDAContext, CUDAContext>( input.meta(), input.size(), input.raw_data(), output->raw_mutable_data(input.meta())); return true; } }; REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); // From CPU, copy it to whatever the current context REGISTER_CUDA_OPERATOR( CopyFromCPUInput, CopyOp<CUDAContext, CUDAContext, CPUContext>); // CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context, // since gpu code will be involved. REGISTER_CUDA_OPERATOR( CopyGPUToCPU, CopyOp<CUDAContext, CPUContext, CUDAContext>); REGISTER_CUDA_OPERATOR( CopyCPUToGPU, CopyOp<CUDAContext, CUDAContext, CPUContext>); // If we only specify Copy, we assume that it is a gpu to gpu copy - maybe // involving different GPUs. REGISTER_CUDA_OPERATOR(Copy, CopyOp<CUDAContext, CUDAContext, CUDAContext>); REGISTER_CUDA_OPERATOR( CopyOnDeviceLike, CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext>); REGISTER_CUDA_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp<CUDAContext>); CAFFE_KNOWN_TYPE(const float*); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.size(); const float* data_ptr = X.data<float>(); scratch_.Resize(1); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); NanCheckKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), scratch_.mutable_data<bool>()); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(cudaMemcpyAsync( &result, scratch_.raw_data(), 1, cudaMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { TensorCPU cpu_X; cpu_X.ResizeLike(Input(j)); // Hack to cause allocaiton happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j), &context_); } context_.FinishDeviceComputation(); std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.size(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, &context_); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); __global__ void ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { maxout[i] = max(X[i], Y[i]); } } template <> bool MaxOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-maxes for (int i = 1; i < InputSize(); ++i) { ElwiseMaxKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>); __global__ void ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { minout[i] = min(X[i], Y[i]); } } template <> bool MinOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-mines for (int i = 1; i < InputSize(); ++i) { ElwiseMinKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>); template <typename T> __global__ void MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) { CUDA_1D_KERNEL_LOOP(i, N) { gi[i] = go[i] * (mx[i] == x[i]); } } template <> bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() { auto& output = Input(0); auto& grad_output = Input(1); const int kInputStartOffset = 2; const float* data = output.data<float>(); for (int i = 0; i < OutputSize(); i++) { auto& input = Input(i + kInputStartOffset); auto* grad_input = Output(i); grad_input->ResizeLike(input); MaxMinGradKernel<<< CAFFE_GET_BLOCKS(input.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( input.size(), output.data<float>(), input.data<float>(), grad_output.data<float>(), grad_input->mutable_data<float>()); } return true; } template <typename T_INDEX> __global__ void GatherKernel( const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) { for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = indices[i]; const float* src_offset = X + idx * block_size; float* dst_offset = Y + i * block_size; for (int j = threadIdx.x; j < block_size; j += blockDim.x) { dst_offset[j] = src_offset[j]; } } } template <> bool GatherOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, OperatorBase::Input<TensorCUDA>(INDICES)); } template <> template <typename Index> bool GatherOp<CUDAContext>::DoRunWithType() { auto& data = Input(DATA); auto& indices = Input(INDICES); auto* output = Output(0); CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D"); auto shape = indices.dims(); shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end()); output->Resize(shape); int block_size = data.size() / data.dim(0); auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize(); CAFFE_ENFORCE( block_bytesize == data.nbytes() / data.dim(0), "block_bytesize should be consistent with data dim"); int N = indices.size(); auto src_base = static_cast<const float*>(data.raw_data()); const Index* idxs = indices.template data<Index>(); auto out = static_cast<float*>(output->raw_mutable_data(data.meta())); // return early when the input is empty, since CUDA kernel will fail for // empty input. if (N <= 0) { return true; } GatherKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(src_base, out, idxs, N, block_size); return true; } REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>); /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const TIndex N, const TIndex B, const TIndex slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const TIndex M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.size(), 0); CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.size(), 1); TIndex M = X0.size(); TIndex N = X0.dim(0); TIndex K = indices.size(); TIndex block_size = M / N; float* data = output->template mutable_data<float>(); // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. const TIndex B = (InputSize() - 3) / 2; x_data_host_.Resize(B); weights_host_.Resize(B); x_data_device_.Resize(B); weights_device_.Resize(B); const float** x_data_host = x_data_host_.mutable_data<const float*>(); const float** weights_host = weights_host_.mutable_data<const float*>(); const float** x_data_device = x_data_device_.mutable_data<const float*>(); const float** weights_device = weights_device_.mutable_data<const float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data()); } context_.Copy<const float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<const float*, CPUContext, CUDAContext>( B, weights_host, weights_device); AxpySliceKernel<<< std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( weight0.template data<float>(), K, B, block_size, weights_device, x_data_device, indices.template data<Index>(), data, M); return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, TIndex N, TIndex K, TIndex block_size) { for (TIndex i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (TIndex j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, TIndex N, TIndex K, TIndex block_size) { scatter_assign_kernel<<< std::min(K, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(data, idxs, slicesData, N, K, block_size); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor<CUDAContext>* output) { int N = output->size(); RangeKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, output->mutable_data<T>(), start, step); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
637d8747f00657694912cb1f30165051f9512402.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/norm_utils.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class FusedBatchNormAddActKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #if CUDNN_VERSION < 7401 PADDLE_THROW(phi::errors::Unimplemented( "The fused_bn_add_activation operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); std::string act_type = ctx.Attr<std::string>("act_type"); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *z = ctx.Input<phi::DenseTensor>("Z"); const auto &in_dims = x->dims(); const auto *scale = ctx.Input<phi::DenseTensor>("Scale"); const auto *bias = ctx.Input<phi::DenseTensor>("Bias"); auto *mean_out = ctx.Output<phi::DenseTensor>("MeanOut"); auto *variance_out = ctx.Output<phi::DenseTensor>("VarianceOut"); dev_ctx.Alloc<BatchNormParamType<T>>( mean_out, mean_out->numel() * sizeof(BatchNormParamType<T>)); dev_ctx.Alloc<BatchNormParamType<T>>( variance_out, variance_out->numel() * sizeof(BatchNormParamType<T>)); auto *saved_mean = ctx.Output<phi::DenseTensor>("SavedMean"); auto *saved_variance = ctx.Output<phi::DenseTensor>("SavedVariance"); dev_ctx.Alloc<BatchNormParamType<T>>( saved_mean, saved_mean->numel() * sizeof(BatchNormParamType<T>)); dev_ctx.Alloc<BatchNormParamType<T>>( saved_variance, saved_variance->numel() * sizeof(BatchNormParamType<T>)); auto *y = ctx.Output<phi::DenseTensor>("Y"); dev_ctx.Alloc<T>(y, y->numel() * sizeof(T)); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; phi::funcs::ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<phi::DenseTensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/data_desc_, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space->Resize({static_cast<int64_t>( (reserve_space_size + phi::SizeOf(x->dtype()) - 1) / phi::SizeOf(x->dtype()))}); reserve_space_ptr = dev_ctx.Alloc<T>(reserve_space, reserve_space->numel() * sizeof(T)); workspace_tensor.Resize( {static_cast<int64_t>((workspace_size + phi::SizeOf(x->dtype()) - 1) / phi::SizeOf(x->dtype()))}); workspace_ptr = dev_ctx.Alloc<T>(&workspace_tensor, workspace_tensor.numel() * sizeof(T)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, z->template data<T>(), data_desc_, y->template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, dev_ctx.template Alloc<BatchNormParamType<T>>( mean_out, mean_out->numel() * sizeof(BatchNormParamType<T>)), dev_ctx.template Alloc<BatchNormParamType<T>>( variance_out, variance_out->numel() * sizeof(BatchNormParamType<T>)), epsilon, dev_ctx.template Alloc<BatchNormParamType<T>>( saved_mean, saved_mean->numel() * sizeof(BatchNormParamType<T>)), dev_ctx.template Alloc<BatchNormParamType<T>>( saved_variance, saved_variance->numel() * sizeof(BatchNormParamType<T>)), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T> class FusedBatchNormAddActGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #if CUDNN_VERSION < 7401 PADDLE_THROW(phi::errors::Unimplemented( "The fused_bn_add_activation operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); std::string act_type = ctx.Attr<std::string>("act_type"); const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *y = ctx.Input<phi::DenseTensor>("Y"); const auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<phi::DenseTensor>("Scale"); const auto *bias = ctx.Input<phi::DenseTensor>("Bias"); const auto *reserve_space = ctx.Input<phi::DenseTensor>("ReserveSpace"); auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); const auto &in_dims = x->dims(); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; phi::funcs::ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X")); auto *d_z = ctx.Output<phi::DenseTensor>(framework::GradVarName("Z")); auto *d_scale = ctx.Output<phi::DenseTensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<phi::DenseTensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); d_z->mutable_data<T>(ctx.GetPlace()); PADDLE_ENFORCE_EQ( d_scale && d_bias, true, platform::errors::PreconditionNotMet( "Both the scale grad and the bias grad must not be null.")); d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL, platform::errors::PreconditionNotMet( "The scale only has one dimension.")); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::PreconditionNotMet( "The size of scale is equal to the channel of Input(X).")); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C}; // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<phi::DenseTensor>("SavedMean"); const auto *saved_var = ctx.Input<phi::DenseTensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); size_t workspace_size = 0; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/data_desc_, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), x->dtype(), workspace_size); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/x->template data<T>(), /*yDesc=*/data_desc_, /*yData=*/y->template data<T>(), /*dyDesc=*/data_desc_, /*dyData=*/d_y->template data<T>(), /*dzDesc=*/data_desc_, /*dzData=*/d_z->template data<T>(), /*dxDesc=*/data_desc_, /*dxData=*/d_x->template data<T>(), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/bias->template data<BatchNormParamType<T>>(), /*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(), /*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesmc=*/activation_desc_, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(fused_bn_add_activation, GPU, ALL_LAYOUT, ops::FusedBatchNormAddActKernel, plat::float16) {} PD_REGISTER_STRUCT_KERNEL(fused_bn_add_activation_grad, GPU, ALL_LAYOUT, ops::FusedBatchNormAddActGradKernel, plat::float16) {}
637d8747f00657694912cb1f30165051f9512402.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/norm_utils.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class FusedBatchNormAddActKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #if CUDNN_VERSION < 7401 PADDLE_THROW(phi::errors::Unimplemented( "The fused_bn_add_activation operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); std::string act_type = ctx.Attr<std::string>("act_type"); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); // Get the size for each dimension. // NHWC [batch_size, in_height, in_width, in_channels] const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *z = ctx.Input<phi::DenseTensor>("Z"); const auto &in_dims = x->dims(); const auto *scale = ctx.Input<phi::DenseTensor>("Scale"); const auto *bias = ctx.Input<phi::DenseTensor>("Bias"); auto *mean_out = ctx.Output<phi::DenseTensor>("MeanOut"); auto *variance_out = ctx.Output<phi::DenseTensor>("VarianceOut"); dev_ctx.Alloc<BatchNormParamType<T>>( mean_out, mean_out->numel() * sizeof(BatchNormParamType<T>)); dev_ctx.Alloc<BatchNormParamType<T>>( variance_out, variance_out->numel() * sizeof(BatchNormParamType<T>)); auto *saved_mean = ctx.Output<phi::DenseTensor>("SavedMean"); auto *saved_variance = ctx.Output<phi::DenseTensor>("SavedVariance"); dev_ctx.Alloc<BatchNormParamType<T>>( saved_mean, saved_mean->numel() * sizeof(BatchNormParamType<T>)); dev_ctx.Alloc<BatchNormParamType<T>>( saved_variance, saved_variance->numel() * sizeof(BatchNormParamType<T>)); auto *y = ctx.Output<phi::DenseTensor>("Y"); dev_ctx.Alloc<T>(y, y->numel() * sizeof(T)); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; phi::funcs::ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // ------------------- cudnn descriptors --------------------- auto handle = dev_ctx.cudnn_handle(); cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C}; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); double this_factor = 1. - momentum; cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<phi::DenseTensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*zDesc=*/data_desc_, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/bnOps_, /*activationDesc=*/activation_desc_, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space->Resize({static_cast<int64_t>( (reserve_space_size + phi::SizeOf(x->dtype()) - 1) / phi::SizeOf(x->dtype()))}); reserve_space_ptr = dev_ctx.Alloc<T>(reserve_space, reserve_space->numel() * sizeof(T)); workspace_tensor.Resize( {static_cast<int64_t>((workspace_size + phi::SizeOf(x->dtype()) - 1) / phi::SizeOf(x->dtype()))}); workspace_ptr = dev_ctx.Alloc<T>(&workspace_tensor, workspace_tensor.numel() * sizeof(T)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, bnOps_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(), data_desc_, z->template data<T>(), data_desc_, y->template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, dev_ctx.template Alloc<BatchNormParamType<T>>( mean_out, mean_out->numel() * sizeof(BatchNormParamType<T>)), dev_ctx.template Alloc<BatchNormParamType<T>>( variance_out, variance_out->numel() * sizeof(BatchNormParamType<T>)), epsilon, dev_ctx.template Alloc<BatchNormParamType<T>>( saved_mean, saved_mean->numel() * sizeof(BatchNormParamType<T>)), dev_ctx.template Alloc<BatchNormParamType<T>>( saved_variance, saved_variance->numel() * sizeof(BatchNormParamType<T>)), activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T> class FusedBatchNormAddActGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #if CUDNN_VERSION < 7401 PADDLE_THROW(phi::errors::Unimplemented( "The fused_bn_add_activation operator is not supported on GPU " "when CUDNN version < 7.4.1")); #endif PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::PreconditionNotMet("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); std::string act_type = ctx.Attr<std::string>("act_type"); const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *y = ctx.Input<phi::DenseTensor>("Y"); const auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<phi::DenseTensor>("Scale"); const auto *bias = ctx.Input<phi::DenseTensor>("Bias"); const auto *reserve_space = ctx.Input<phi::DenseTensor>("ReserveSpace"); auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); const auto &in_dims = x->dims(); int N, C, H, W, D; const DataLayout data_layout = DataLayout::kNHWC; phi::funcs::ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D); // init output auto *d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X")); auto *d_z = ctx.Output<phi::DenseTensor>(framework::GradVarName("Z")); auto *d_scale = ctx.Output<phi::DenseTensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<phi::DenseTensor>(framework::GradVarName("Bias")); d_x->mutable_data<T>(ctx.GetPlace()); d_z->mutable_data<T>(ctx.GetPlace()); PADDLE_ENFORCE_EQ( d_scale && d_bias, true, platform::errors::PreconditionNotMet( "Both the scale grad and the bias grad must not be null.")); d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL, platform::errors::PreconditionNotMet( "The scale only has one dimension.")); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::PreconditionNotMet( "The size of scale is equal to the channel of Input(X).")); std::vector<int> dims = {N, C, H, W, D}; std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C}; // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<phi::DenseTensor>("SavedMean"); const auto *saved_var = ctx.Input<phi::DenseTensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); size_t workspace_size = 0; void *workspace_ptr = nullptr; phi::DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; platform::ScopedActivationDescriptor scope_act_desc; cudnnActivationDescriptor_t activation_desc_ = scope_act_desc.descriptor<T>(act_type); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/data_desc_, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/activation_desc_, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), x->dtype(), workspace_size); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/bnOps_, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/x->template data<T>(), /*yDesc=*/data_desc_, /*yData=*/y->template data<T>(), /*dyDesc=*/data_desc_, /*dyData=*/d_y->template data<T>(), /*dzDesc=*/data_desc_, /*dzData=*/d_z->template data<T>(), /*dxDesc=*/data_desc_, /*dxData=*/d_x->template data<T>(), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/bias->template data<BatchNormParamType<T>>(), /*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(), /*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesmc=*/activation_desc_, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; PD_REGISTER_STRUCT_KERNEL(fused_bn_add_activation, GPU, ALL_LAYOUT, ops::FusedBatchNormAddActKernel, plat::float16) {} PD_REGISTER_STRUCT_KERNEL(fused_bn_add_activation_grad, GPU, ALL_LAYOUT, ops::FusedBatchNormAddActGradKernel, plat::float16) {}
43f75278c1a0f5fe0b6bddad68465c094c2fdd7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // fermi /* * Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" { __global__ void transposeKernel(const int h, const int w, float* output, const float* input); } __global__ void transposeKernel(const int h, const int w, float* output, const float* input) { const int i = blockIdx.y; const int bj = blockIdx.x; const int wtj = threadIdx.y; const int ttj = threadIdx.x; const int nrThreadsW = min(1024, w); const int nrThreadsNrThreadsW = min(32, nrThreadsW); const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj; if (tj < nrThreadsW) { const int j = bj * (1 * nrThreadsW) + tj; if (j < w) { output[j + i * (1 * w)] = input[i + j * (1 * h)]; } } }
43f75278c1a0f5fe0b6bddad68465c094c2fdd7f.cu
// fermi /* * Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" { __global__ void transposeKernel(const int h, const int w, float* output, const float* input); } __global__ void transposeKernel(const int h, const int w, float* output, const float* input) { const int i = blockIdx.y; const int bj = blockIdx.x; const int wtj = threadIdx.y; const int ttj = threadIdx.x; const int nrThreadsW = min(1024, w); const int nrThreadsNrThreadsW = min(32, nrThreadsW); const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj; if (tj < nrThreadsW) { const int j = bj * (1 * nrThreadsW) + tj; if (j < w) { output[j + i * (1 * w)] = input[i + j * (1 * h)]; } } }
09df45589acf352c39a3ffc3085ddecfe53363e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _RESET_ON_COLLISION_KERNEL_CU_ #define _RESET_ON_COLLISION_KERNEL_CU_ #include "VehicleData.h" #include "CUDAKernelOptions.cu" #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define P_F(i) (CUT_BANK_CHECKER(((float*)position), i)) #define P(i) (CUT_BANK_CHECKER(position, i)) #define R(i) (CUT_BANK_CHECKER(random, i)) #else #define P_F(i) ((float*)position)[i] #define P(i) position[i] #define R(i) random[i] #endif __global__ void resetOnCollisionKernel(VehicleData *vehicleData, float *random, float3 position, float radius) { int id = (blockIdx.x * blockDim.x + threadIdx.x); int blockOffset = (blockDim.x * blockIdx.x * 3); int numOfAgents = gridDim.x * blockDim.x; // shared memory for position __shared__ float3 position[TPB]; // shared memory for random numbers __shared__ float random[TPB]; __shared__ int rindex[TPB]; rindex[threadIdx.x] = threadIdx.x; // init random indices // copy position data from global memory (coalesced) P_F(threadIdx.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x]; P_F(threadIdx.x + blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + blockDim.x]; P_F(threadIdx.x + 2*blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + 2*blockDim.x]; // copy random numbers from global memory (coalesced) R(threadIdx.x) = random[id]; float d = float3Distance(P(threadIdx.x), position); float r = radius + (*vehicleData).radius[id]; if (d < r) { // } } #endif // _RESET_ON_COLLISION_KERNEL_CU_
09df45589acf352c39a3ffc3085ddecfe53363e1.cu
#ifndef _RESET_ON_COLLISION_KERNEL_CU_ #define _RESET_ON_COLLISION_KERNEL_CU_ #include "VehicleData.h" #include "CUDAKernelOptions.cu" #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define P_F(i) (CUT_BANK_CHECKER(((float*)position), i)) #define P(i) (CUT_BANK_CHECKER(position, i)) #define R(i) (CUT_BANK_CHECKER(random, i)) #else #define P_F(i) ((float*)position)[i] #define P(i) position[i] #define R(i) random[i] #endif __global__ void resetOnCollisionKernel(VehicleData *vehicleData, float *random, float3 position, float radius) { int id = (blockIdx.x * blockDim.x + threadIdx.x); int blockOffset = (blockDim.x * blockIdx.x * 3); int numOfAgents = gridDim.x * blockDim.x; // shared memory for position __shared__ float3 position[TPB]; // shared memory for random numbers __shared__ float random[TPB]; __shared__ int rindex[TPB]; rindex[threadIdx.x] = threadIdx.x; // init random indices // copy position data from global memory (coalesced) P_F(threadIdx.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x]; P_F(threadIdx.x + blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + blockDim.x]; P_F(threadIdx.x + 2*blockDim.x) = ((float*)(*vehicleData).position)[blockOffset + threadIdx.x + 2*blockDim.x]; // copy random numbers from global memory (coalesced) R(threadIdx.x) = random[id]; float d = float3Distance(P(threadIdx.x), position); float r = radius + (*vehicleData).radius[id]; if (d < r) { // } } #endif // _RESET_ON_COLLISION_KERNEL_CU_
a22914e8c9ac7506dc5dd5e21dee7b4befa63292.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ex2.h" #include <cuda/atomic> #include <stdio.h> #include <vector> #include <queue> #define NSTREAMS 64 #define FREE -1 #define IMAGE_SIZE IMG_HEIGHT*IMG_WIDTH #define REGS_PER_THREAD 32 #define NEEDED_SHARED 256*(sizeof(int) + sizeof(uchar)) + sizeof(int) + 2*sizeof(uchar*); #define N_IMG_PAIRS 10000 #define QUEUE_SIZE 16 #define MIN_THREADS 256 __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } // Example single-threadblock kernel for processing a single image. // Feel free to change it. __device__ void process_image_kernel(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ uchar map[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < IMG_HEIGHT * IMG_HEIGHT; i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { float map_value = float(histogram[tid]) / (IMG_WIDTH * IMG_HEIGHT); map[tid] = ((uchar)(N_COLORS * map_value)) * (256 / N_COLORS); } __syncthreads(); for (int i = tid; i < IMG_WIDTH * IMG_HEIGHT; i += blockDim.x) { out[i] = map[in[i]]; } } __global__ void serial_process_image_kernel(uchar *in, uchar *out){ process_image_kernel(in, out); return; } class streams_server : public image_processing_server{ private: // TODO define stream server context (memory buffers, streams, etc...) hipStream_t streams[NSTREAMS]; int stream_to_img[NSTREAMS]; uchar *dimg_in; uchar *dimg_out; int last_img_id; int get_available_stream(){ for(int i = 0 ; i < NSTREAMS ; i++){ if(stream_to_img[i] == FREE){ return i; } } return -1; } public: streams_server(){ // TODO initialize context (memory buffers, streams, etc...) for (int i=0 ; i<NSTREAMS ; i++){ CUDA_CHECK(hipStreamCreate(&streams[i])); stream_to_img[i] = FREE; } CUDA_CHECK( hipMalloc(&dimg_in, NSTREAMS * IMG_WIDTH * IMG_HEIGHT) ); CUDA_CHECK( hipMalloc(&dimg_out, NSTREAMS * IMG_WIDTH * IMG_HEIGHT) ); } ~streams_server() override { /* free streams */ for (int i=0 ; i<NSTREAMS ; i++) CUDA_CHECK(hipStreamDestroy(streams[i])); CUDA_CHECK( hipFree(dimg_in) ); CUDA_CHECK( hipFree(dimg_out) ); } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override{ int free = this->get_available_stream(); if(free == -1){ return false; } int image_size = IMG_WIDTH * IMG_HEIGHT; stream_to_img[free] = img_id; CUDA_CHECK( hipMemcpyAsync(&dimg_in[image_size * free], img_in, image_size , hipMemcpyHostToDevice, streams[free] )); hipLaunchKernelGGL(( serial_process_image_kernel), dim3(1), dim3(1024),0, streams[free], &dimg_in[image_size * free], &dimg_out[ image_size * free]); // process_image_kernel(&dimg_in[image_size * free], &dimg_out[ image_size * free]); CUDA_CHECK( hipMemcpyAsync(img_out, &dimg_out[image_size * free], image_size , hipMemcpyDeviceToHost, streams[free] )); return true; } bool dequeue(int *img_id) override{ for(int i = 0 ; i< NSTREAMS ; i++){ if( (hipStreamQuery(streams[i]) == hipSuccess) && (stream_to_img[i] != FREE)){ *img_id = stream_to_img[i]; stream_to_img[i] = FREE; return true; } } return false; } }; std::unique_ptr<image_processing_server> create_streams_server() { return std::make_unique<streams_server>(); } //----------------------------------------------------------------------------- // GENERAL STUFF START //----------------------------------------------------------------------------- int query_device(int thread_per_threadblock){ //calculate optimal parameters per block int desired_regs = REGS_PER_THREAD * thread_per_threadblock; //calculate limitations hipDeviceProp_t prop; CUDA_CHECK(hipGetDeviceProperties(&prop, 0)); int num_sm_properties = prop.multiProcessorCount; int threads_properties = prop.maxThreadsPerMultiProcessor; int registers_properties = prop.regsPerMultiprocessor; int shared_memory_properties = prop.sharedMemPerMultiprocessor; //calculate most strict limit of thread blocks per sm int res1 = threads_properties/thread_per_threadblock; int res2 = registers_properties/desired_regs; int res3 = shared_memory_properties/NEEDED_SHARED; int num_of_theadblock_res = min(res1, min(res2,res3 )) * num_sm_properties; return num_of_theadblock_res; } class producer_consumer_q { private: public: cuda::atomic<int> queue_tail; cuda::atomic<int> queue_head; uchar* images[QUEUE_SIZE]; int image_index[QUEUE_SIZE]; uchar* image_ptrs[QUEUE_SIZE]; __device__ bool is_queue_empty(){ return queue_tail.load(cuda::memory_order_acquire) == queue_head; } __device__ bool is_queue_full(){ return queue_tail - queue_head.load(cuda::memory_order_acquire) == QUEUE_SIZE; } producer_consumer_q(){ queue_head = 0; queue_tail = 0; for(int i = 0; i < QUEUE_SIZE; i++){ image_ptrs[i] = NULL; images[i] = NULL; image_index[i] = FREE; } } }PCQ; __device__ bool enqueue_gpu(producer_consumer_q* q,int img_id) { int tail = q->queue_tail.load(cuda::memory_order_relaxed); while(q->is_queue_full()); // busy wait - no returning false int index = q->queue_tail % QUEUE_SIZE; q->image_index[index] = img_id; q->queue_tail.store((tail + 1), cuda::memory_order_release); return true; } __device__ bool dequeue_gpu(producer_consumer_q* q,int* img_id, uchar* &image, uchar* &current_image) { int head = q->queue_head.load(cuda::memory_order_relaxed); int index = q->queue_head % QUEUE_SIZE; if(q->is_queue_empty()){ return false; } current_image = q->image_ptrs[index]; image = q->images[index]; *img_id = q->image_index[index]; q->queue_head.store((head + 1), cuda::memory_order_release); return true; } __global__ void worker(producer_consumer_q *cpu_gpu_q, producer_consumer_q *gpu_cpu_q, bool *stop) { int tid = threadIdx.x; int bid = blockIdx.x; __shared__ int img_id; __shared__ uchar* img_in; __shared__ uchar* img_out; while(*stop != true) { __shared__ bool d_res; if(tid == 0){ d_res = dequeue_gpu(&cpu_gpu_q[bid],&img_id, img_in, img_out); } __syncthreads(); __threadfence_system(); if(d_res == false){ continue; } process_image_kernel(img_in, img_out); __syncthreads(); __threadfence_system(); if(tid == 0){ enqueue_gpu(&gpu_cpu_q[bid],img_id); } } } class queue_server : public image_processing_server { private: int threadblock_number; bool *stop; producer_consumer_q *cpu_gpu_q; producer_consumer_q *gpu_cpu_q; __host__ bool enqueue_cpu(producer_consumer_q* q, int img_id, uchar* current_image, uchar* image_ptr) { int tail = q->queue_tail.load(cuda::memory_order_relaxed); if(((tail - q->queue_head.load(cuda::memory_order_acquire)) == QUEUE_SIZE)) { return false; } int index = q->queue_tail % QUEUE_SIZE; q->images[index] = current_image; q->image_ptrs[index] = image_ptr; q->image_index[index] = img_id; q->queue_tail.store((tail + 1), cuda::memory_order_release); return true; } __host__ bool dequeue_cpu(producer_consumer_q* q,int* img_id) { int head = q->queue_head.load(cuda::memory_order_relaxed); if(q->queue_tail.load(cuda::memory_order_acquire) == q->queue_head) { return false; } int index = q->queue_head % QUEUE_SIZE; *img_id = q->image_index[index]; q->queue_head.store((head + 1), cuda::memory_order_release); return true; } public: queue_server(int threads) { threadblock_number = query_device(threads); //allocation CUDA_CHECK( hipHostMalloc(&cpu_gpu_q, sizeof(producer_consumer_q) * threadblock_number) ); CUDA_CHECK( hipHostMalloc(&gpu_cpu_q, sizeof(producer_consumer_q) * threadblock_number) ); CUDA_CHECK( hipHostMalloc(&stop, sizeof(bool)) ); //init queues new (cpu_gpu_q) producer_consumer_q[threadblock_number]; new (gpu_cpu_q) producer_consumer_q[threadblock_number]; //stop flag *stop = false; //kernel invocation hipLaunchKernelGGL(( worker), dim3(threadblock_number), dim3(threads), 0, 0, cpu_gpu_q, gpu_cpu_q, stop); } ~queue_server() override { *stop = true; CUDA_CHECK( hipDeviceSynchronize() ); if(gpu_cpu_q != NULL) CUDA_CHECK( hipHostFree(gpu_cpu_q) ); if(cpu_gpu_q != NULL) CUDA_CHECK( hipHostFree(cpu_gpu_q) ); if(cpu_gpu_q != NULL) CUDA_CHECK( hipHostFree(stop) ); // if(stop != NULL) { // stop->~atomic<bool>(); // CUDA_CHECK(hipHostFree(stop)); // } } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override { for(int i = 0; i < threadblock_number; i++) if(enqueue_cpu(&cpu_gpu_q[i],img_id, img_in, img_out)) { return true; } return false; } bool dequeue(int *img_id) override { for(int i = 0; i < threadblock_number; i++) if(dequeue_cpu(&gpu_cpu_q[i],img_id)) { return true; } return false; } }; std::unique_ptr<image_processing_server> create_queues_server(int threads) { return std::make_unique<queue_server>(threads); }
a22914e8c9ac7506dc5dd5e21dee7b4befa63292.cu
#include "ex2.h" #include <cuda/atomic> #include <stdio.h> #include <vector> #include <queue> #define NSTREAMS 64 #define FREE -1 #define IMAGE_SIZE IMG_HEIGHT*IMG_WIDTH #define REGS_PER_THREAD 32 #define NEEDED_SHARED 256*(sizeof(int) + sizeof(uchar)) + sizeof(int) + 2*sizeof(uchar*); #define N_IMG_PAIRS 10000 #define QUEUE_SIZE 16 #define MIN_THREADS 256 __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } // Example single-threadblock kernel for processing a single image. // Feel free to change it. __device__ void process_image_kernel(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ uchar map[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < IMG_HEIGHT * IMG_HEIGHT; i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { float map_value = float(histogram[tid]) / (IMG_WIDTH * IMG_HEIGHT); map[tid] = ((uchar)(N_COLORS * map_value)) * (256 / N_COLORS); } __syncthreads(); for (int i = tid; i < IMG_WIDTH * IMG_HEIGHT; i += blockDim.x) { out[i] = map[in[i]]; } } __global__ void serial_process_image_kernel(uchar *in, uchar *out){ process_image_kernel(in, out); return; } class streams_server : public image_processing_server{ private: // TODO define stream server context (memory buffers, streams, etc...) cudaStream_t streams[NSTREAMS]; int stream_to_img[NSTREAMS]; uchar *dimg_in; uchar *dimg_out; int last_img_id; int get_available_stream(){ for(int i = 0 ; i < NSTREAMS ; i++){ if(stream_to_img[i] == FREE){ return i; } } return -1; } public: streams_server(){ // TODO initialize context (memory buffers, streams, etc...) for (int i=0 ; i<NSTREAMS ; i++){ CUDA_CHECK(cudaStreamCreate(&streams[i])); stream_to_img[i] = FREE; } CUDA_CHECK( cudaMalloc(&dimg_in, NSTREAMS * IMG_WIDTH * IMG_HEIGHT) ); CUDA_CHECK( cudaMalloc(&dimg_out, NSTREAMS * IMG_WIDTH * IMG_HEIGHT) ); } ~streams_server() override { /* free streams */ for (int i=0 ; i<NSTREAMS ; i++) CUDA_CHECK(cudaStreamDestroy(streams[i])); CUDA_CHECK( cudaFree(dimg_in) ); CUDA_CHECK( cudaFree(dimg_out) ); } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override{ int free = this->get_available_stream(); if(free == -1){ return false; } int image_size = IMG_WIDTH * IMG_HEIGHT; stream_to_img[free] = img_id; CUDA_CHECK( cudaMemcpyAsync(&dimg_in[image_size * free], img_in, image_size , cudaMemcpyHostToDevice, streams[free] )); serial_process_image_kernel<<<1, 1024,0, streams[free]>>>(&dimg_in[image_size * free], &dimg_out[ image_size * free]); // process_image_kernel(&dimg_in[image_size * free], &dimg_out[ image_size * free]); CUDA_CHECK( cudaMemcpyAsync(img_out, &dimg_out[image_size * free], image_size , cudaMemcpyDeviceToHost, streams[free] )); return true; } bool dequeue(int *img_id) override{ for(int i = 0 ; i< NSTREAMS ; i++){ if( (cudaStreamQuery(streams[i]) == cudaSuccess) && (stream_to_img[i] != FREE)){ *img_id = stream_to_img[i]; stream_to_img[i] = FREE; return true; } } return false; } }; std::unique_ptr<image_processing_server> create_streams_server() { return std::make_unique<streams_server>(); } //----------------------------------------------------------------------------- // GENERAL STUFF START //----------------------------------------------------------------------------- int query_device(int thread_per_threadblock){ //calculate optimal parameters per block int desired_regs = REGS_PER_THREAD * thread_per_threadblock; //calculate limitations cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, 0)); int num_sm_properties = prop.multiProcessorCount; int threads_properties = prop.maxThreadsPerMultiProcessor; int registers_properties = prop.regsPerMultiprocessor; int shared_memory_properties = prop.sharedMemPerMultiprocessor; //calculate most strict limit of thread blocks per sm int res1 = threads_properties/thread_per_threadblock; int res2 = registers_properties/desired_regs; int res3 = shared_memory_properties/NEEDED_SHARED; int num_of_theadblock_res = min(res1, min(res2,res3 )) * num_sm_properties; return num_of_theadblock_res; } class producer_consumer_q { private: public: cuda::atomic<int> queue_tail; cuda::atomic<int> queue_head; uchar* images[QUEUE_SIZE]; int image_index[QUEUE_SIZE]; uchar* image_ptrs[QUEUE_SIZE]; __device__ bool is_queue_empty(){ return queue_tail.load(cuda::memory_order_acquire) == queue_head; } __device__ bool is_queue_full(){ return queue_tail - queue_head.load(cuda::memory_order_acquire) == QUEUE_SIZE; } producer_consumer_q(){ queue_head = 0; queue_tail = 0; for(int i = 0; i < QUEUE_SIZE; i++){ image_ptrs[i] = NULL; images[i] = NULL; image_index[i] = FREE; } } }PCQ; __device__ bool enqueue_gpu(producer_consumer_q* q,int img_id) { int tail = q->queue_tail.load(cuda::memory_order_relaxed); while(q->is_queue_full()); // busy wait - no returning false int index = q->queue_tail % QUEUE_SIZE; q->image_index[index] = img_id; q->queue_tail.store((tail + 1), cuda::memory_order_release); return true; } __device__ bool dequeue_gpu(producer_consumer_q* q,int* img_id, uchar* &image, uchar* &current_image) { int head = q->queue_head.load(cuda::memory_order_relaxed); int index = q->queue_head % QUEUE_SIZE; if(q->is_queue_empty()){ return false; } current_image = q->image_ptrs[index]; image = q->images[index]; *img_id = q->image_index[index]; q->queue_head.store((head + 1), cuda::memory_order_release); return true; } __global__ void worker(producer_consumer_q *cpu_gpu_q, producer_consumer_q *gpu_cpu_q, bool *stop) { int tid = threadIdx.x; int bid = blockIdx.x; __shared__ int img_id; __shared__ uchar* img_in; __shared__ uchar* img_out; while(*stop != true) { __shared__ bool d_res; if(tid == 0){ d_res = dequeue_gpu(&cpu_gpu_q[bid],&img_id, img_in, img_out); } __syncthreads(); __threadfence_system(); if(d_res == false){ continue; } process_image_kernel(img_in, img_out); __syncthreads(); __threadfence_system(); if(tid == 0){ enqueue_gpu(&gpu_cpu_q[bid],img_id); } } } class queue_server : public image_processing_server { private: int threadblock_number; bool *stop; producer_consumer_q *cpu_gpu_q; producer_consumer_q *gpu_cpu_q; __host__ bool enqueue_cpu(producer_consumer_q* q, int img_id, uchar* current_image, uchar* image_ptr) { int tail = q->queue_tail.load(cuda::memory_order_relaxed); if(((tail - q->queue_head.load(cuda::memory_order_acquire)) == QUEUE_SIZE)) { return false; } int index = q->queue_tail % QUEUE_SIZE; q->images[index] = current_image; q->image_ptrs[index] = image_ptr; q->image_index[index] = img_id; q->queue_tail.store((tail + 1), cuda::memory_order_release); return true; } __host__ bool dequeue_cpu(producer_consumer_q* q,int* img_id) { int head = q->queue_head.load(cuda::memory_order_relaxed); if(q->queue_tail.load(cuda::memory_order_acquire) == q->queue_head) { return false; } int index = q->queue_head % QUEUE_SIZE; *img_id = q->image_index[index]; q->queue_head.store((head + 1), cuda::memory_order_release); return true; } public: queue_server(int threads) { threadblock_number = query_device(threads); //allocation CUDA_CHECK( cudaMallocHost(&cpu_gpu_q, sizeof(producer_consumer_q) * threadblock_number) ); CUDA_CHECK( cudaMallocHost(&gpu_cpu_q, sizeof(producer_consumer_q) * threadblock_number) ); CUDA_CHECK( cudaMallocHost(&stop, sizeof(bool)) ); //init queues new (cpu_gpu_q) producer_consumer_q[threadblock_number]; new (gpu_cpu_q) producer_consumer_q[threadblock_number]; //stop flag *stop = false; //kernel invocation worker<<<threadblock_number, threads>>>(cpu_gpu_q, gpu_cpu_q, stop); } ~queue_server() override { *stop = true; CUDA_CHECK( cudaDeviceSynchronize() ); if(gpu_cpu_q != NULL) CUDA_CHECK( cudaFreeHost(gpu_cpu_q) ); if(cpu_gpu_q != NULL) CUDA_CHECK( cudaFreeHost(cpu_gpu_q) ); if(cpu_gpu_q != NULL) CUDA_CHECK( cudaFreeHost(stop) ); // if(stop != NULL) { // stop->~atomic<bool>(); // CUDA_CHECK(cudaFreeHost(stop)); // } } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override { for(int i = 0; i < threadblock_number; i++) if(enqueue_cpu(&cpu_gpu_q[i],img_id, img_in, img_out)) { return true; } return false; } bool dequeue(int *img_id) override { for(int i = 0; i < threadblock_number; i++) if(dequeue_cpu(&gpu_cpu_q[i],img_id)) { return true; } return false; } }; std::unique_ptr<image_processing_server> create_queues_server(int threads) { return std::make_unique<queue_server>(threads); }
8e427e67172ac9b74b0500f8a41e2bc2002f15c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip/Sort.h> #include <ATen/core/TensorBase.h> #include <ATen/core/Array.h> #include <ATen/Dispatch.h> #include <ATen/hip/cub.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <limits> #include <c10/core/DeviceArray.h> namespace at { namespace native { // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically void sortKeyValueInplace(const TensorBase& key, const TensorBase& value, int dim, bool dir) { TORCH_CHECK(key.sizes() == value.sizes(), "Key tensor must have same size as value tensor"); int dims = value.dim(); TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions"); // if key and value tensors have the same size, we do not need to check both ptrdiff_t inElements = key.numel(); if (inElements == 0) { return; } int64_t keySliceSize = key.size(dim); ptrdiff_t keySlices = inElements / keySliceSize; // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present"); // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort"); #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ GTOp<scalar_t, true>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTOp<scalar_t, true>()); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ LTOp<scalar_t, true>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTOp<scalar_t, true>()); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ TORCH_INTERNAL_ASSERT(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] { if (at::cuda::detail::canUse32BitIndexMath(key)) { at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key); at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo = at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key); at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo = at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } }); #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE } namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; } namespace { // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template<typename scalar_t> __global__ void sort_postprocess_kernel(const scalar_t *in, scalar_t *out, int64_t *index, const int2 *i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t *in_ = in + offset; scalar_t *out_ = out + offset; int64_t *index_ = index + offset; const int2 *i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2 *data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t *data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template<typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t * self_ptr, scalar_t * values_ptr, int64_t * indices_ptr ) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, indices.get(), nsort, nsort_divider); const int64_t *initial_indices = indices.get(); for (auto i: c10::irange(nsegments)){ at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template<typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t *const self_ptr, scalar_t *const values_ptr, int64_t *const indices_ptr ) { int64_t segment_bits = std::max<int64_t>(1L, static_cast<int64_t>(::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2 *>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_index_and_segment_kernel), dim3(grid), dim3(block), 0, stream, i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2 *>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t *>(i_s_ptr2), reinterpret_cast<int64_t *>(i_s_ptr), n, false, 0, segment_bits); hipLaunchKernelGGL(( sort_postprocess_kernel), dim3((n + 511) / 512), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template<typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t *self_ptr, scalar_t *values_ptr, int64_t *indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t *reverse_indices_ptr = static_cast<int64_t *>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase &self, int64_t dim, bool descending, const TensorBase &values, const TensorBase &indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t *indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&]{ c10::guts::if_constexpr<!(is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){ const scalar_t *self_ptr = self.data_ptr<scalar_t>(); scalar_t *values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = ::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { //rough heuristics where even a single sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort(nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs(nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } }} // namespace at::native
8e427e67172ac9b74b0500f8a41e2bc2002f15c6.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/Sort.h> #include <ATen/core/TensorBase.h> #include <ATen/core/Array.h> #include <ATen/Dispatch.h> #include <ATen/cuda/cub.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <limits> #include <c10/core/DeviceArray.h> namespace at { namespace native { // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically void sortKeyValueInplace(const TensorBase& key, const TensorBase& value, int dim, bool dir) { TORCH_CHECK(key.sizes() == value.sizes(), "Key tensor must have same size as value tensor"); int dims = value.dim(); TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions"); // if key and value tensors have the same size, we do not need to check both ptrdiff_t inElements = key.numel(); if (inElements == 0) { return; } int64_t keySliceSize = key.size(dim); ptrdiff_t keySlices = inElements / keySliceSize; // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present"); // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort"); #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ GTOp<scalar_t, true>, TYPE, SIZE> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTOp<scalar_t, true>()); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else { \ bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ LTOp<scalar_t, true>, TYPE, SIZE> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTOp<scalar_t, true>()); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ TORCH_INTERNAL_ASSERT(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] { if (at::cuda::detail::canUse32BitIndexMath(key)) { at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key); at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo = at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key); at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo = at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } }); #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE } namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; } namespace { // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template<typename scalar_t> __global__ void sort_postprocess_kernel(const scalar_t *in, scalar_t *out, int64_t *index, const int2 *i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t *in_ = in + offset; scalar_t *out_ = out + offset; int64_t *index_ = index + offset; const int2 *i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2 *data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t *data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template<typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t * self_ptr, scalar_t * values_ptr, int64_t * indices_ptr ) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::cuda::getCurrentCUDAStream(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( indices.get(), nsort, nsort_divider); const int64_t *initial_indices = indices.get(); for (auto i: c10::irange(nsegments)){ at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template<typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t *const self_ptr, scalar_t *const values_ptr, int64_t *const indices_ptr ) { int64_t segment_bits = std::max<int64_t>(1L, static_cast<int64_t>(std::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2 *>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_index_and_segment_kernel<<<grid, block, 0, stream>>>( i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2 *>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t *>(i_s_ptr2), reinterpret_cast<int64_t *>(i_s_ptr), n, false, 0, segment_bits); sort_postprocess_kernel<<<(n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>( self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template<typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t *self_ptr, scalar_t *values_ptr, int64_t *indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t *reverse_indices_ptr = static_cast<int64_t *>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase &self, int64_t dim, bool descending, const TensorBase &values, const TensorBase &indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t *indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&]{ c10::guts::if_constexpr<!(is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){ const scalar_t *self_ptr = self.data_ptr<scalar_t>(); scalar_t *values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = std::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { //rough heuristics where even a single sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort(nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs(nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } }} // namespace at::native
d2641dea6a8cbedd7e514b9ae7aad26cf8dd6469.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" const int WARP_SIZE = 32; typedef THCDeviceTensor<float, 3> DeviceTensor3; typedef THCDeviceTensor<float, 1> DeviceTensor1; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } struct Float2 { float v1, v2; __device__ Float2() {} __device__ Float2(float v1, float v2) : v1(v1), v2(v2) {} __device__ Float2(float v) : v1(v), v2(v) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return tensor[batch][plane][n]; } const DeviceTensor3 tensor; }; struct VarOp { __device__ VarOp(float m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { float val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const float mean; const DeviceTensor3 tensor; }; struct GradOp { __device__ GradOp(float m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { float g = gradOutput[batch][plane][n]; float c = input[batch][plane][n] - mean; return Float2(g, g * c); } const float mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp static __device__ __forceinline__ float warpSum(float val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += __shfl_xor(val, 1 << i, WARP_SIZE); } #else __shared__ float values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } static __device__ __forceinline__ Float2 warpSum(Float2 value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <int Dim> static THCDeviceTensor<float, Dim> devicetensor(THCState *state, THCudaTensor *t) { if (!t) { return THCDeviceTensor<float, Dim>(); } int inDim = THCudaTensor_nDimension(state, t); if (inDim == Dim) { return toDeviceTensor<float, Dim>(state, t); } // View in which the last dimensions are collapsed or expanded as needed THAssert(THCudaTensor_isContiguous(state, t)); int size[Dim]; for (int i = 0; i < Dim || i < inDim; ++i) { if (i < Dim && i < inDim) { size[i] = t->size[i]; } else if (i < Dim) { size[i] = 1; } else { size[Dim - 1] *= t->size[i]; } } return THCDeviceTensor<float, Dim>(THCudaTensor_data(state, t), size); } __global__ void BatchNormalizationUpdateOutputInference_kernel( const DeviceTensor3 input, DeviceTensor3 output, DeviceTensor1 runningMean, DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, float epsilon) { int plane = blockIdx.x; float invstd = 1.0f / sqrt(runningVar[plane].ldg() + epsilon); float mean = runningMean[plane].ldg(); float gamma = weight.numElements() > 0 ? weight[plane].ldg() : 1.0f; float beta = bias.numElements() > 0 ? bias[plane].ldg() : 0.0f; // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { float inp = input[batch][plane][x].ldg(); output[batch][plane][x] = gamma * (inp - mean) * invstd + beta; } } } __global__ void BatchNormalizationUpdateOutput_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const float epsilon, const float momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveStd) { int plane = blockIdx.x; int N = input.getSize(0) * input.getSize(2); float norm = 1.0f / N; // Compute the mean and variance across (batch, x/y/z) float mean = reduce<float>(SumOp(input), input, plane) * norm; __syncthreads(); float varN = reduce<float>(VarOp(mean, input), input, plane); float invStd = 0.0f; if (varN != 0.0f || epsilon != 0.0f) { invStd = 1 / sqrt(varN * norm + epsilon); } // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback float unbiasedVar = varN / (N - 1); saveMean[plane] = mean; saveStd[plane] = invStd; runningMean[plane] = (1 - momentum) * runningMean[plane] + momentum * mean; runningVar[plane] = (1 - momentum) * runningVar[plane] + momentum * unbiasedVar; } // Write normalized and update the output float gamma = weight.numElements() > 0 ? weight[plane] : 1.0f; float beta = bias.numElements() > 0 ? bias[plane] : 0.0f; for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { float inp = input[batch][plane][x].ldg(); output[batch][plane][x] = gamma * (inp - mean) * invStd + beta; } } } void THNN_CudaBatchNormalization_updateOutput( THCState *state, THCudaTensor *input_, THCudaTensor *output_, THCudaTensor *weight_, THCudaTensor *bias_, THCudaTensor *runningMean_, THCudaTensor *runningVar_, THCudaTensor *saveMean_, THCudaTensor *saveStd_, bool train, double momentum, double eps) { THCUNN_assertSameGPU(state, 8, input_, output_, weight_, bias_, runningMean_, runningVar_, saveMean_, saveStd_); DeviceTensor3 input = devicetensor<3>(state, input_); DeviceTensor3 output = devicetensor<3>(state, output_); DeviceTensor1 weight = devicetensor<1>(state, weight_); DeviceTensor1 bias = devicetensor<1>(state, bias_); DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); hipStream_t s = THCState_getCurrentStream(state); hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel), dim3(blocks), dim3(threads), 0, s, input, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel), dim3(blocks), dim3(threads), 0, s, input, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(hipGetLastError()); } __global__ void BatchNormalizationBackward_kernel( const DeviceTensor3 input, const DeviceTensor3 gradOutput, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveStd, bool train, float scale, double eps) { int plane = blockIdx.x; int N = gradOutput.getSize(0) * gradOutput.getSize(2); float mean, stdVal; if (train) { mean = saveMean[plane]; stdVal = saveStd[plane]; } else { mean = runningMean[plane]; stdVal = 1 / sqrt(runningVar[plane] + eps); } float weightVal = weight.numElements() > 0 ? weight[plane] : 1.0f; float norm = 1.0f / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) Float2 res = reduce<Float2>(GradOp(mean, input, gradOutput), gradOutput, plane); float gradOutputSum = res.v1; float dotP = res.v2; float gradMean = gradOutputSum * norm; float projScale = dotP * norm * stdVal * stdVal; float gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) { float gradOut = gradOutput[batch][plane][x]; if (train) { float inp = input[batch][plane][x]; float proj = (inp - mean) * projScale; gradInput[batch][plane][x] = (gradOut - proj - gradMean) * gradScale; } else { gradInput[batch][plane][x] = gradOut * gradScale; } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += scale * dotP * stdVal; } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += scale * gradOutputSum; } } } void THNN_CudaBatchNormalization_backward( THCState *state, THCudaTensor *input_, THCudaTensor *gradOutput_, THCudaTensor *gradInput_, THCudaTensor *gradWeight_, THCudaTensor *gradBias_, THCudaTensor *weight_, THCudaTensor *runningMean_, THCudaTensor *runningVar_, THCudaTensor *saveMean_, THCudaTensor *saveStd_, bool train, float scale, double eps) { THCUNN_assertSameGPU(state, 10, input_, gradOutput_, gradInput_, gradWeight_, gradBias_, weight_, runningMean_, runningVar_, saveMean_, saveStd_); DeviceTensor3 input = devicetensor<3>(state, input_); DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_); DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_); DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_); DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_); DeviceTensor1 weight = devicetensor<1>(state, weight_); DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); hipStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationBackward_kernel), dim3(blocks), dim3(threads), 0, s, input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(hipGetLastError()); }
d2641dea6a8cbedd7e514b9ae7aad26cf8dd6469.cu
#include "THCUNN.h" #include "common.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" const int WARP_SIZE = 32; typedef THCDeviceTensor<float, 3> DeviceTensor3; typedef THCDeviceTensor<float, 1> DeviceTensor1; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } struct Float2 { float v1, v2; __device__ Float2() {} __device__ Float2(float v1, float v2) : v1(v1), v2(v2) {} __device__ Float2(float v) : v1(v), v2(v) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return tensor[batch][plane][n]; } const DeviceTensor3 tensor; }; struct VarOp { __device__ VarOp(float m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { float val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const float mean; const DeviceTensor3 tensor; }; struct GradOp { __device__ GradOp(float m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { float g = gradOutput[batch][plane][n]; float c = input[batch][plane][n] - mean; return Float2(g, g * c); } const float mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp static __device__ __forceinline__ float warpSum(float val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += __shfl_xor(val, 1 << i, WARP_SIZE); } #else __shared__ float values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } static __device__ __forceinline__ Float2 warpSum(Float2 value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <int Dim> static THCDeviceTensor<float, Dim> devicetensor(THCState *state, THCudaTensor *t) { if (!t) { return THCDeviceTensor<float, Dim>(); } int inDim = THCudaTensor_nDimension(state, t); if (inDim == Dim) { return toDeviceTensor<float, Dim>(state, t); } // View in which the last dimensions are collapsed or expanded as needed THAssert(THCudaTensor_isContiguous(state, t)); int size[Dim]; for (int i = 0; i < Dim || i < inDim; ++i) { if (i < Dim && i < inDim) { size[i] = t->size[i]; } else if (i < Dim) { size[i] = 1; } else { size[Dim - 1] *= t->size[i]; } } return THCDeviceTensor<float, Dim>(THCudaTensor_data(state, t), size); } __global__ void BatchNormalizationUpdateOutputInference_kernel( const DeviceTensor3 input, DeviceTensor3 output, DeviceTensor1 runningMean, DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, float epsilon) { int plane = blockIdx.x; float invstd = 1.0f / sqrt(runningVar[plane].ldg() + epsilon); float mean = runningMean[plane].ldg(); float gamma = weight.numElements() > 0 ? weight[plane].ldg() : 1.0f; float beta = bias.numElements() > 0 ? bias[plane].ldg() : 0.0f; // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { float inp = input[batch][plane][x].ldg(); output[batch][plane][x] = gamma * (inp - mean) * invstd + beta; } } } __global__ void BatchNormalizationUpdateOutput_kernel( const DeviceTensor3 input, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const float epsilon, const float momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveStd) { int plane = blockIdx.x; int N = input.getSize(0) * input.getSize(2); float norm = 1.0f / N; // Compute the mean and variance across (batch, x/y/z) float mean = reduce<float>(SumOp(input), input, plane) * norm; __syncthreads(); float varN = reduce<float>(VarOp(mean, input), input, plane); float invStd = 0.0f; if (varN != 0.0f || epsilon != 0.0f) { invStd = 1 / sqrt(varN * norm + epsilon); } // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback float unbiasedVar = varN / (N - 1); saveMean[plane] = mean; saveStd[plane] = invStd; runningMean[plane] = (1 - momentum) * runningMean[plane] + momentum * mean; runningVar[plane] = (1 - momentum) * runningVar[plane] + momentum * unbiasedVar; } // Write normalized and update the output float gamma = weight.numElements() > 0 ? weight[plane] : 1.0f; float beta = bias.numElements() > 0 ? bias[plane] : 0.0f; for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { float inp = input[batch][plane][x].ldg(); output[batch][plane][x] = gamma * (inp - mean) * invStd + beta; } } } void THNN_CudaBatchNormalization_updateOutput( THCState *state, THCudaTensor *input_, THCudaTensor *output_, THCudaTensor *weight_, THCudaTensor *bias_, THCudaTensor *runningMean_, THCudaTensor *runningVar_, THCudaTensor *saveMean_, THCudaTensor *saveStd_, bool train, double momentum, double eps) { THCUNN_assertSameGPU(state, 8, input_, output_, weight_, bias_, runningMean_, runningVar_, saveMean_, saveStd_); DeviceTensor3 input = devicetensor<3>(state, input_); DeviceTensor3 output = devicetensor<3>(state, output_); DeviceTensor1 weight = devicetensor<1>(state, weight_); DeviceTensor1 bias = devicetensor<1>(state, bias_); DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); cudaStream_t s = THCState_getCurrentStream(state); cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); BatchNormalizationUpdateOutputInference_kernel<<<blocks, threads, 0, s>>>( input, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); BatchNormalizationUpdateOutput_kernel<<<blocks, threads, 0, s>>>( input, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(cudaGetLastError()); } __global__ void BatchNormalizationBackward_kernel( const DeviceTensor3 input, const DeviceTensor3 gradOutput, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveStd, bool train, float scale, double eps) { int plane = blockIdx.x; int N = gradOutput.getSize(0) * gradOutput.getSize(2); float mean, stdVal; if (train) { mean = saveMean[plane]; stdVal = saveStd[plane]; } else { mean = runningMean[plane]; stdVal = 1 / sqrt(runningVar[plane] + eps); } float weightVal = weight.numElements() > 0 ? weight[plane] : 1.0f; float norm = 1.0f / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) Float2 res = reduce<Float2>(GradOp(mean, input, gradOutput), gradOutput, plane); float gradOutputSum = res.v1; float dotP = res.v2; float gradMean = gradOutputSum * norm; float projScale = dotP * norm * stdVal * stdVal; float gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradOutput.getSize(2); x += blockDim.x) { float gradOut = gradOutput[batch][plane][x]; if (train) { float inp = input[batch][plane][x]; float proj = (inp - mean) * projScale; gradInput[batch][plane][x] = (gradOut - proj - gradMean) * gradScale; } else { gradInput[batch][plane][x] = gradOut * gradScale; } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += scale * dotP * stdVal; } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += scale * gradOutputSum; } } } void THNN_CudaBatchNormalization_backward( THCState *state, THCudaTensor *input_, THCudaTensor *gradOutput_, THCudaTensor *gradInput_, THCudaTensor *gradWeight_, THCudaTensor *gradBias_, THCudaTensor *weight_, THCudaTensor *runningMean_, THCudaTensor *runningVar_, THCudaTensor *saveMean_, THCudaTensor *saveStd_, bool train, float scale, double eps) { THCUNN_assertSameGPU(state, 10, input_, gradOutput_, gradInput_, gradWeight_, gradBias_, weight_, runningMean_, runningVar_, saveMean_, saveStd_); DeviceTensor3 input = devicetensor<3>(state, input_); DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_); DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_); DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_); DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_); DeviceTensor1 weight = devicetensor<1>(state, weight_); DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); cudaStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); BatchNormalizationBackward_kernel<<<blocks, threads, 0, s>>>( input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(cudaGetLastError()); }
ebdd846c997488abd55e1b89062feb372386f23a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include <math_functions.h> #include "util.h" #include "cudautil.h" typedef struct { int rows; int cols; float* vals; } cuda_matrix_t; __global__ void matrix_test() { cuda_matrix_t A = {2, 3, {0,0,0,0,0}}, B = {2, 3}, C = {2, 3}; } int main (int argc, char *argv[]) { hipLaunchKernelGGL(( matrix_test), dim3(1),dim3(10), 0, 0, ); }
ebdd846c997488abd55e1b89062feb372386f23a.cu
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <math.h> #include <time.h> #include <cuda.h> #include <math_functions.h> #include "util.h" #include "cudautil.h" typedef struct { int rows; int cols; float* vals; } cuda_matrix_t; __global__ void matrix_test() { cuda_matrix_t A = {2, 3, {0,0,0,0,0}}, B = {2, 3}, C = {2, 3}; } int main (int argc, char *argv[]) { matrix_test<<<1,10>>>(); }
039acefde7714cc2c6aa38f834121de1c52a0815.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <cuml/fil/fil.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <cuda_utils.cuh> #include <limits> #include <memory> #include <random/rng.cuh> #include <utility> #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int num_rows; int num_cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; float global_bias; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; fil::leaf_algo_t leaf_algo; // when FLOAT_UNARY_BINARY == leaf_algo: // num_classes = 1 means it's regression // num_classes = 2 means it's binary classification // (complement probabilities, then use threshold) // when GROVE_PER_CLASS == leaf_algo: // it's multiclass classification (num_classes must be > 2), // done by splitting the forest in num_classes groups, // each of which computes one-vs-all probability for its class. // when CATEGORICAL_LEAF == leaf_algo: // num_classes must be > 1 and it's multiclass classification. // done by storing the class label in each leaf and voting. // it's used in treelite ModelBuilder initialization int num_classes; size_t num_proba_outputs() { return num_rows * ::max(num_classes, 2); } size_t num_preds_outputs() { return num_rows; } }; std::string output2str(fil::output_t output) { if (output == fil::RAW) return "RAW"; std::string s = ""; if (output & fil::AVG) s += "| AVG"; if (output & fil::CLASS) s += "| CLASS"; if (output & fil::SIGMOID) s += "| SIGMOID"; return s; } std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo << ", num_classes = " << ps.num_classes; return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void setup_helper() { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void SetUp() override { setup_helper(); } void TearDown() override { CUDA_CHECK(hipFree(preds_d)); CUDA_CHECK(hipFree(want_preds_d)); CUDA_CHECK(hipFree(data_d)); CUDA_CHECK(hipFree(want_proba_d)); CUDA_CHECK(hipFree(proba_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data /// weights, used as float* or int* int* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data raft::allocate(weights_d, num_nodes); // sizeof(float) == sizeof(int) raft::allocate(thresholds_d, num_nodes); raft::allocate(fids_d, num_nodes); raft::allocate(def_lefts_d, num_nodes); raft::allocate(is_leafs_d, num_nodes); // generate on-GPU random data raft::random::Rng r(ps.seed); if (ps.leaf_algo != fil::leaf_algo_t::CATEGORICAL_LEAF) { r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream); } else { // [0..num_classes) r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream); } r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> thresholds_h(num_nodes); std::vector<int> weights_h(num_nodes), fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream); raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream); raft::update_host(fids_h.data(), fids_d, num_nodes, stream); raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream); raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::val_t w; switch (ps.leaf_algo) { case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break; case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // not relying on fil::val_t internals // merely that we copied floats into weights_h earlier std::memcpy(&w.f, &weights_h[i], sizeof w.f); break; default: ASSERT(false, "internal error: invalid ps.leaf_algo"); } fil::node_init(&nodes[i], w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(hipFree(is_leafs_d)); CUDA_CHECK(hipFree(def_lefts_d)); CUDA_CHECK(hipFree(fids_d)); CUDA_CHECK(hipFree(thresholds_d)); CUDA_CHECK(hipFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; raft::allocate(data_d, num_data); bool* mask_d = nullptr; raft::allocate(mask_d, num_data); // generate random data raft::random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; hipLaunchKernelGGL(( nan_kernel), dim3(raft::ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream, data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); // copy to host data_h.resize(num_data); raft::update_host(data_h.data(), data_d, num_data, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // clean up CUDA_CHECK(hipFree(mask_d)); } void transform(float f, float& proba, float& output) { if ((ps.output & fil::output_t::AVG) != 0) { f *= (1.0f / ps.num_trees); } f += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); } proba = f; if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; } output = f; } void complement(float* proba) { proba[0] = 1.0f - proba[1]; } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_preds_outputs()); std::vector<float> want_proba_h(ps.num_proba_outputs()); int num_nodes = tree_num_nodes(); std::vector<float> class_scores(ps.num_classes); switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f; } transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]); complement(&(want_proba_h[i * 2])); } break; case fil::leaf_algo_t::GROVE_PER_CLASS: for (int row = 0; row < ps.num_rows; ++row) { std::fill(class_scores.begin(), class_scores.end(), 0.0f); for (int tree = 0; tree < ps.num_trees; ++tree) { class_scores[tree % ps.num_classes] += infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]) .f; } // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) want_preds_h[row] = std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin(); } break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<int> class_votes(ps.num_classes); for (int r = 0; r < ps.num_rows; ++r) { std::fill(class_votes.begin(), class_votes.end(), 0); for (int j = 0; j < ps.num_trees; ++j) { int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]) .idx; ++class_votes[class_label]; } for (int c = 0; c < ps.num_classes; ++c) { float thresholded_proba; // not used; do argmax instead transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba); } want_preds_h[r] = std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin(); } break; } // copy to GPU raft::allocate(want_preds_d, ps.num_preds_outputs()); raft::allocate(want_proba_d, ps.num_proba_outputs()); raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream); raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict raft::allocate(preds_d, ps.num_preds_outputs()); raft::allocate(proba_d, ps.num_proba_outputs()); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true); CUDA_CHECK(hipStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) { ASSERT_TRUE( raft::devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(), raft::CompareApprox<float>(ps.tolerance), stream)); } float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? ps.tolerance : std::numeric_limits<float>::epsilon(); // in multi-class prediction, floats represent the most likely class // and would be generated by converting an int to float ASSERT_TRUE(raft::devArrMatch(want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream)); } fil::val_t infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float threshold = 0.0f; fil::val_t output{.f = 0.0f}; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* proba_d = nullptr; float* want_preds_d = nullptr; float* want_proba_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters hipStream_t stream; raft::handle_t handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil_ps.leaf_algo = ps.leaf_algo; fil_ps.num_classes = ps.num_classes; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; template <typename fil_node_t> class BasePredictSparseFilTest : public BaseFilTest { protected: void dense2sparse_node(const fil::dense_node_t* dense_root, int i_dense, int i_sparse_root, int i_sparse) { float threshold; fil::val_t output; int feature; bool def_left, is_leaf; fil::node_decode(&dense_root[i_dense], &output, &threshold, &feature, &def_left, &is_leaf); if (is_leaf) { // leaf sparse node node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left, is_leaf, 0); return; } // inner sparse node // reserve space for children int left_index = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); sparse_nodes.push_back(fil_node_t()); node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left, is_leaf, left_index - i_sparse_root); dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index); dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1); } void dense2sparse_tree(const fil::dense_node_t* dense_root) { int i_sparse_root = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root); trees.push_back(i_sparse_root); } void dense2sparse() { for (int tree = 0; tree < ps.num_trees; ++tree) { dense2sparse_tree(&nodes[tree * tree_num_nodes()]); } } void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_params; fil_params.num_trees = ps.num_trees; fil_params.num_cols = ps.num_cols; fil_params.algo = ps.algo; fil_params.output = ps.output; fil_params.threshold = ps.threshold; fil_params.global_bias = ps.global_bias; fil_params.leaf_algo = ps.leaf_algo; fil_params.num_classes = ps.num_classes; dense2sparse(); fil_params.num_nodes = sparse_nodes.size(); fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params); } std::vector<fil_node_t> sparse_nodes; std::vector<int> trees; }; typedef BasePredictSparseFilTest<fil::sparse_node16_t> PredictSparse16FilTest; typedef BasePredictSparseFilTest<fil::sparse_node8_t> PredictSparse8FilTest; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; builder->CreateNode(key); int feature; float threshold; fil::val_t output; bool is_leaf, default_left; fil::node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // default is fil::FLOAT_UNARY_BINARY builder->SetLeafNode(key, output.f); break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<tl::tl_float> vec(ps.num_classes); for (int i = 0; i < ps.num_classes; ++i) vec[i] = i == output.idx ? 1.0f : 0.0f; builder->SetLeafVectorNode(key, vec); } } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); builder->SetNumericalTestNode(key, feature, ps.op, threshold, default_left, left_key, right_key); } return key; } void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type) { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; int treelite_num_classes = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes; std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder( ps.num_cols, treelite_num_classes, random_forest_flag)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) { model_builder->SetModelParam("pred_transform", "max_index"); ps.output = fil::output_t(ps.output | fil::output_t::CLASS); } else { model_builder->SetModelParam("pred_transform", "identity"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); tree_builder->SetRootNode(root_key); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); model_builder->CommitModel(model.get()); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::CLASS) != 0; params.storage_type = storage_type; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(hipStreamSynchronize(stream)); } }; class TreeliteDenseFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::DENSE); } }; class TreeliteSparse16FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE); } }; class TreeliteSparse8FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE8); } }; class TreeliteAutoFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::AUTO); } }; // test for failures; currently only supported for sparse8 nodes class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest { protected: // model import happens in check(), so this function is empty void SetUp() override {} void check() { ASSERT_THROW(setup_helper(), raft::exception); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> predict_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 7}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 49, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 7}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 4}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 4}, }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> predict_sparse_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5000}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 2, 5000, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5000}, {20000, 50, 0.05, 8, 60, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 6}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 3}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 3}, }; TEST_P(PredictSparse16FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs)); TEST_P(PredictSparse8FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 7}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 48, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::GROVE_PER_CLASS, 6}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 49, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 7}, {20000, 50, 0.05, 8, 48, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 6}, }; TEST_P(TreeliteDenseFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_sparse_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::CATEGORICAL_LEAF, 10}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::GROVE_PER_CLASS, 10}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, }; TEST_P(TreeliteSparse16FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs)); TEST_P(TreeliteSparse8FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_auto_inputs = { {20000, 50, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 15, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 10, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 10, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, #if 0 {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, #endif }; TEST_P(TreeliteAutoFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, // FIL implementation, number of classes // adjust test parameters if the sparse8 format changes std::vector<FilTestParams> import_throw_sparse8_inputs = { // to many features {100, 20000, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, // too many tree nodes {20000, 50, 0.05, 16, 5, 0, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, }; TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest, testing::ValuesIn(import_throw_sparse8_inputs)); } // namespace ML
039acefde7714cc2c6aa38f834121de1c52a0815.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <cuml/fil/fil.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <cuda_utils.cuh> #include <limits> #include <memory> #include <random/rng.cuh> #include <utility> #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int num_rows; int num_cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; float global_bias; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; fil::leaf_algo_t leaf_algo; // when FLOAT_UNARY_BINARY == leaf_algo: // num_classes = 1 means it's regression // num_classes = 2 means it's binary classification // (complement probabilities, then use threshold) // when GROVE_PER_CLASS == leaf_algo: // it's multiclass classification (num_classes must be > 2), // done by splitting the forest in num_classes groups, // each of which computes one-vs-all probability for its class. // when CATEGORICAL_LEAF == leaf_algo: // num_classes must be > 1 and it's multiclass classification. // done by storing the class label in each leaf and voting. // it's used in treelite ModelBuilder initialization int num_classes; size_t num_proba_outputs() { return num_rows * std::max(num_classes, 2); } size_t num_preds_outputs() { return num_rows; } }; std::string output2str(fil::output_t output) { if (output == fil::RAW) return "RAW"; std::string s = ""; if (output & fil::AVG) s += "| AVG"; if (output & fil::CLASS) s += "| CLASS"; if (output & fil::SIGMOID) s += "| SIGMOID"; return s; } std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo << ", num_classes = " << ps.num_classes; return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void setup_helper() { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void SetUp() override { setup_helper(); } void TearDown() override { CUDA_CHECK(cudaFree(preds_d)); CUDA_CHECK(cudaFree(want_preds_d)); CUDA_CHECK(cudaFree(data_d)); CUDA_CHECK(cudaFree(want_proba_d)); CUDA_CHECK(cudaFree(proba_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data /// weights, used as float* or int* int* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data raft::allocate(weights_d, num_nodes); // sizeof(float) == sizeof(int) raft::allocate(thresholds_d, num_nodes); raft::allocate(fids_d, num_nodes); raft::allocate(def_lefts_d, num_nodes); raft::allocate(is_leafs_d, num_nodes); // generate on-GPU random data raft::random::Rng r(ps.seed); if (ps.leaf_algo != fil::leaf_algo_t::CATEGORICAL_LEAF) { r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream); } else { // [0..num_classes) r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream); } r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> thresholds_h(num_nodes); std::vector<int> weights_h(num_nodes), fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream); raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream); raft::update_host(fids_h.data(), fids_d, num_nodes, stream); raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream); raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::val_t w; switch (ps.leaf_algo) { case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break; case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // not relying on fil::val_t internals // merely that we copied floats into weights_h earlier std::memcpy(&w.f, &weights_h[i], sizeof w.f); break; default: ASSERT(false, "internal error: invalid ps.leaf_algo"); } fil::node_init(&nodes[i], w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(cudaFree(is_leafs_d)); CUDA_CHECK(cudaFree(def_lefts_d)); CUDA_CHECK(cudaFree(fids_d)); CUDA_CHECK(cudaFree(thresholds_d)); CUDA_CHECK(cudaFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; raft::allocate(data_d, num_data); bool* mask_d = nullptr; raft::allocate(mask_d, num_data); // generate random data raft::random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; nan_kernel<<<raft::ceildiv(int(num_data), tpb), tpb, 0, stream>>>( data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); // copy to host data_h.resize(num_data); raft::update_host(data_h.data(), data_d, num_data, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // clean up CUDA_CHECK(cudaFree(mask_d)); } void transform(float f, float& proba, float& output) { if ((ps.output & fil::output_t::AVG) != 0) { f *= (1.0f / ps.num_trees); } f += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); } proba = f; if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; } output = f; } void complement(float* proba) { proba[0] = 1.0f - proba[1]; } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_preds_outputs()); std::vector<float> want_proba_h(ps.num_proba_outputs()); int num_nodes = tree_num_nodes(); std::vector<float> class_scores(ps.num_classes); switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f; } transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]); complement(&(want_proba_h[i * 2])); } break; case fil::leaf_algo_t::GROVE_PER_CLASS: for (int row = 0; row < ps.num_rows; ++row) { std::fill(class_scores.begin(), class_scores.end(), 0.0f); for (int tree = 0; tree < ps.num_trees; ++tree) { class_scores[tree % ps.num_classes] += infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]) .f; } // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) want_preds_h[row] = std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin(); } break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<int> class_votes(ps.num_classes); for (int r = 0; r < ps.num_rows; ++r) { std::fill(class_votes.begin(), class_votes.end(), 0); for (int j = 0; j < ps.num_trees; ++j) { int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]) .idx; ++class_votes[class_label]; } for (int c = 0; c < ps.num_classes; ++c) { float thresholded_proba; // not used; do argmax instead transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba); } want_preds_h[r] = std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin(); } break; } // copy to GPU raft::allocate(want_preds_d, ps.num_preds_outputs()); raft::allocate(want_proba_d, ps.num_proba_outputs()); raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream); raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict raft::allocate(preds_d, ps.num_preds_outputs()); raft::allocate(proba_d, ps.num_proba_outputs()); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true); CUDA_CHECK(cudaStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) { ASSERT_TRUE( raft::devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(), raft::CompareApprox<float>(ps.tolerance), stream)); } float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? ps.tolerance : std::numeric_limits<float>::epsilon(); // in multi-class prediction, floats represent the most likely class // and would be generated by converting an int to float ASSERT_TRUE(raft::devArrMatch(want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream)); } fil::val_t infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float threshold = 0.0f; fil::val_t output{.f = 0.0f}; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* proba_d = nullptr; float* want_preds_d = nullptr; float* want_proba_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters cudaStream_t stream; raft::handle_t handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil_ps.leaf_algo = ps.leaf_algo; fil_ps.num_classes = ps.num_classes; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; template <typename fil_node_t> class BasePredictSparseFilTest : public BaseFilTest { protected: void dense2sparse_node(const fil::dense_node_t* dense_root, int i_dense, int i_sparse_root, int i_sparse) { float threshold; fil::val_t output; int feature; bool def_left, is_leaf; fil::node_decode(&dense_root[i_dense], &output, &threshold, &feature, &def_left, &is_leaf); if (is_leaf) { // leaf sparse node node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left, is_leaf, 0); return; } // inner sparse node // reserve space for children int left_index = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); sparse_nodes.push_back(fil_node_t()); node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left, is_leaf, left_index - i_sparse_root); dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index); dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1); } void dense2sparse_tree(const fil::dense_node_t* dense_root) { int i_sparse_root = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root); trees.push_back(i_sparse_root); } void dense2sparse() { for (int tree = 0; tree < ps.num_trees; ++tree) { dense2sparse_tree(&nodes[tree * tree_num_nodes()]); } } void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_params; fil_params.num_trees = ps.num_trees; fil_params.num_cols = ps.num_cols; fil_params.algo = ps.algo; fil_params.output = ps.output; fil_params.threshold = ps.threshold; fil_params.global_bias = ps.global_bias; fil_params.leaf_algo = ps.leaf_algo; fil_params.num_classes = ps.num_classes; dense2sparse(); fil_params.num_nodes = sparse_nodes.size(); fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params); } std::vector<fil_node_t> sparse_nodes; std::vector<int> trees; }; typedef BasePredictSparseFilTest<fil::sparse_node16_t> PredictSparse16FilTest; typedef BasePredictSparseFilTest<fil::sparse_node8_t> PredictSparse8FilTest; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; builder->CreateNode(key); int feature; float threshold; fil::val_t output; bool is_leaf, default_left; fil::node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // default is fil::FLOAT_UNARY_BINARY builder->SetLeafNode(key, output.f); break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<tl::tl_float> vec(ps.num_classes); for (int i = 0; i < ps.num_classes; ++i) vec[i] = i == output.idx ? 1.0f : 0.0f; builder->SetLeafVectorNode(key, vec); } } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); builder->SetNumericalTestNode(key, feature, ps.op, threshold, default_left, left_key, right_key); } return key; } void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type) { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; int treelite_num_classes = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes; std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder( ps.num_cols, treelite_num_classes, random_forest_flag)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) { model_builder->SetModelParam("pred_transform", "max_index"); ps.output = fil::output_t(ps.output | fil::output_t::CLASS); } else { model_builder->SetModelParam("pred_transform", "identity"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); tree_builder->SetRootNode(root_key); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); model_builder->CommitModel(model.get()); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::CLASS) != 0; params.storage_type = storage_type; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(cudaStreamSynchronize(stream)); } }; class TreeliteDenseFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::DENSE); } }; class TreeliteSparse16FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE); } }; class TreeliteSparse8FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE8); } }; class TreeliteAutoFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::AUTO); } }; // test for failures; currently only supported for sparse8 nodes class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest { protected: // model import happens in check(), so this function is empty void SetUp() override {} void check() { ASSERT_THROW(setup_helper(), raft::exception); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> predict_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 7}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 49, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 7}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 4}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 4}, }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> predict_sparse_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 5000}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 2, 5000, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 5000}, {20000, 50, 0.05, 8, 60, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 6}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 3}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0), fil::leaf_algo_t::GROVE_PER_CLASS, 3}, }; TEST_P(PredictSparse16FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs)); TEST_P(PredictSparse8FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 7}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 48, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::GROVE_PER_CLASS, 6}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 49, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 7}, {20000, 50, 0.05, 8, 48, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 6}, }; TEST_P(TreeliteDenseFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_sparse_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 2}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::CATEGORICAL_LEAF, 10}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::CATEGORICAL_LEAF, 5}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 1.0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE, fil::leaf_algo_t::GROVE_PER_CLASS, 10}, {20000, 50, 0.05, 8, 52, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 4}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE, fil::leaf_algo_t::GROVE_PER_CLASS, 5}, {20000, 50, 0.05, 8, 51, 0.05, fil::output_t::CLASS, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, }; TEST_P(TreeliteSparse16FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs)); TEST_P(TreeliteSparse8FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes std::vector<FilTestParams> import_auto_inputs = { {20000, 50, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 15, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, {20000, 50, 0.05, 10, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 3}, {20000, 50, 0.05, 10, 51, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::GROVE_PER_CLASS, 3}, #if 0 {20000, 50, 0.05, 19, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::CATEGORICAL_LEAF, 6}, #endif }; TEST_P(TreeliteAutoFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs)); // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance, branch comparison operator, // FIL implementation, number of classes // adjust test parameters if the sparse8 format changes std::vector<FilTestParams> import_throw_sparse8_inputs = { // to many features {100, 20000, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, // too many tree nodes {20000, 50, 0.05, 16, 5, 0, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT, fil::leaf_algo_t::FLOAT_UNARY_BINARY, 1}, }; TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest, testing::ValuesIn(import_throw_sparse8_inputs)); } // namespace ML
22308faa8d1412868a3933d640f287439b7c99e9.hip
// !!! This is a file automatically generated by hipify!!! /* * : 2018-12-7 * : 15:15 * : */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <opencv2/opencv.hpp> #include "gaussBlur.cuh" using namespace std; using namespace cv; /* @property * @func * @param_in d_inputImageRGBA * @param_in rows * @param_in cols * @param_out d_red * @param_out d_green * @param_out d_blue */ __global__ void separate_channels(const uchar4* const d_inputImageRGBA,unsigned char* const d_red, unsigned char* const d_green, unsigned char* const d_blue, int rows,int cols) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; int idx=r*cols+c; if(r>=rows||c>=cols) { return; } d_red[idx]=d_inputImageRGBA[idx].x; d_green[idx]=d_inputImageRGBA[idx].y; d_blue[idx]=d_inputImageRGBA[idx].z; } /* @property * @func * @param_in d_channel * @param_in d_filter * @param_in d_filterWidth * @param_in rows * @param_in cols * @param_out d_channelBlurred */ __global__ void gaussblur(const unsigned char* const d_channel, unsigned char* const d_channelBlurred , float* d_filter,int rows,int cols,const int filterWidth) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; const int idx=r*cols+c; float color = 0.0f; for (int filter_y = 0; filter_y < filterWidth; filter_y++) { for (int filter_x = 0; filter_x < filterWidth; filter_x++) { int image_x = c + filter_x - filterWidth / 2; int image_y = r + filter_y - filterWidth / 2; image_x = min(max(image_x , 0), cols - 1); image_y = min(max(image_y , 0), rows - 1); float filter_value = d_filter[filter_y*filterWidth + filter_x]; color += filter_value * static_cast<float>(d_channel[image_y *cols + image_x]); } } d_channelBlurred[idx] = color; } /* @property * @func * @param_in d_redBlurred * @param_in d_greenBlurred * @param_in d_blueBlurred * @param_in rows * @param_in cols * @param_out d_outputImageRGBA */ __global__ void recombineChannels(const unsigned char* const d_redBlurred,const unsigned char* const d_greenBlurred, const unsigned char* const d_blueBlurred, uchar4* const d_outputImageRGBA, int rows,int cols) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; const int idx=r*cols+c; if(r>=rows||c>=cols) { return; } unsigned char red = d_redBlurred[idx]; unsigned char green = d_greenBlurred[idx]; unsigned char blue = d_blueBlurred[idx]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); d_outputImageRGBA[idx] = outputPixel; } cv::Mat gauss_blur(cv::Mat inputImage,cv::Mat inputTMP,const float *h_filter, int filterWidth) { uchar4 *h_inputImageRGBA,*h_outputImage; uchar4 *d_inputImageRGBA,*d_outputImageRGBA; unsigned char *d_red,*d_green,*d_blue,*d_redBlurred,*d_greenBlurred,*d_blueBlurred; float *d_filter; h_inputImageRGBA=(uchar4 *)inputImage.ptr<unsigned char>(0); h_outputImage=(uchar4 *)inputTMP.ptr<unsigned char>(0); const int rows=inputImage.rows; const int cols=inputImage.cols; const size_t numPixels=rows*cols; /************************************************************************* *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* *************************************************************************/ hipMalloc((void **)&d_inputImageRGBA,sizeof(uchar4)*numPixels); hipMalloc((void **)&d_outputImageRGBA,sizeof(uchar4)*numPixels); hipMalloc((void **)&d_red,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_green,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_blue,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_redBlurred,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_greenBlurred,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_blueBlurred,sizeof(unsigned char)*numPixels); hipMalloc((void **)&d_filter,sizeof(float)*filterWidth*filterWidth); //CPUh_inputImageRGBAGPUd_inputImageRGBA hipMemcpy(d_inputImageRGBA,h_inputImageRGBA,sizeof(uchar4)*numPixels,hipMemcpyHostToDevice); //CPUh_filterGPUd_filter hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice); const dim3 blockSize(32,32,1); const dim3 gridSize((cols-1+32)/32,(rows-1+32)/32,1); // hipLaunchKernelGGL(( separate_channels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA,d_red,d_green,d_blue,rows,cols); hipDeviceSynchronize(); // hipLaunchKernelGGL(( gaussblur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,d_redBlurred,d_filter,rows,cols,filterWidth); hipDeviceSynchronize(); hipLaunchKernelGGL(( gaussblur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,d_greenBlurred,d_filter,rows,cols,filterWidth); hipDeviceSynchronize(); hipLaunchKernelGGL(( gaussblur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,d_blueBlurred,d_filter,rows,cols,filterWidth); hipDeviceSynchronize(); // hipLaunchKernelGGL(( recombineChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_redBlurred,d_greenBlurred,d_blueBlurred,d_outputImageRGBA,rows,cols); hipDeviceSynchronize(); //GPUd_outputImageRGBACPUh_outputImage hipMemcpy(h_outputImage,d_outputImageRGBA,sizeof(uchar4)*numPixels,hipMemcpyDeviceToHost); Mat output(rows,cols,CV_8UC4,h_outputImage); /*********************************************************************************** *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ***********************************************************************************/ hipFree(d_red); hipFree(d_green); hipFree(d_blue); hipFree(d_redBlurred); hipFree(d_greenBlurred); hipFree(d_blueBlurred); hipFree(d_filter); hipFree(d_inputImageRGBA); hipFree(d_outputImageRGBA); return output; }
22308faa8d1412868a3933d640f287439b7c99e9.cu
/* * 日期: 2018-12-7 * 时间: 15:15 * 姓名: 杨丰拓 */ #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <opencv2/opencv.hpp> #include "gaussBlur.cuh" using namespace std; using namespace cv; /* @property 核函数 * @func 将输入图片分离为三个颜色通道 * @param_in d_inputImageRGBA 输入图片 * @param_in rows 图片的行数 * @param_in cols 图片的列数 * @param_out d_red 分离出的红色通道 * @param_out d_green 分离出的绿色通道 * @param_out d_blue 分离出的蓝色通道 */ __global__ void separate_channels(const uchar4* const d_inputImageRGBA,unsigned char* const d_red, unsigned char* const d_green, unsigned char* const d_blue, int rows,int cols) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; int idx=r*cols+c; if(r>=rows||c>=cols) { return; } d_red[idx]=d_inputImageRGBA[idx].x; d_green[idx]=d_inputImageRGBA[idx].y; d_blue[idx]=d_inputImageRGBA[idx].z; } /* @property 核函数 * @func 对单颜色通道进行高斯模糊 * @param_in d_channel 单颜色通道 * @param_in d_filter 高斯核 * @param_in d_filterWidth 高斯核边长 * @param_in rows 图片的行数 * @param_in cols 图片的列数 * @param_out d_channelBlurred 高斯滤波后的颜色通道 */ __global__ void gaussblur(const unsigned char* const d_channel, unsigned char* const d_channelBlurred , float* d_filter,int rows,int cols,const int filterWidth) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; const int idx=r*cols+c; float color = 0.0f; for (int filter_y = 0; filter_y < filterWidth; filter_y++) { for (int filter_x = 0; filter_x < filterWidth; filter_x++) { int image_x = c + filter_x - filterWidth / 2; int image_y = r + filter_y - filterWidth / 2; image_x = min(max(image_x , 0), cols - 1); image_y = min(max(image_y , 0), rows - 1); float filter_value = d_filter[filter_y*filterWidth + filter_x]; color += filter_value * static_cast<float>(d_channel[image_y *cols + image_x]); } } d_channelBlurred[idx] = color; } /* @property 核函数 * @func 合并高斯模糊后的三条颜色通道 * @param_in d_redBlurred 高斯滤波后的红色通道 * @param_in d_greenBlurred 高斯滤波后的绿色通道 * @param_in d_blueBlurred 高斯滤波后的蓝色通道 * @param_in rows 图片的行数 * @param_in cols 图片的列数 * @param_out d_outputImageRGBA 合并通道后的图片 */ __global__ void recombineChannels(const unsigned char* const d_redBlurred,const unsigned char* const d_greenBlurred, const unsigned char* const d_blueBlurred, uchar4* const d_outputImageRGBA, int rows,int cols) { const int r=blockIdx.y*blockDim.y+threadIdx.y; const int c=blockIdx.x*blockDim.x+threadIdx.x; const int idx=r*cols+c; if(r>=rows||c>=cols) { return; } unsigned char red = d_redBlurred[idx]; unsigned char green = d_greenBlurred[idx]; unsigned char blue = d_blueBlurred[idx]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); d_outputImageRGBA[idx] = outputPixel; } cv::Mat gauss_blur(cv::Mat inputImage,cv::Mat inputTMP,const float *h_filter, int filterWidth) { uchar4 *h_inputImageRGBA,*h_outputImage; uchar4 *d_inputImageRGBA,*d_outputImageRGBA; unsigned char *d_red,*d_green,*d_blue,*d_redBlurred,*d_greenBlurred,*d_blueBlurred; float *d_filter; h_inputImageRGBA=(uchar4 *)inputImage.ptr<unsigned char>(0); h_outputImage=(uchar4 *)inputTMP.ptr<unsigned char>(0); const int rows=inputImage.rows; const int cols=inputImage.cols; const size_t numPixels=rows*cols; /************************************************************************* *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~分配显存~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* *************************************************************************/ cudaMalloc((void **)&d_inputImageRGBA,sizeof(uchar4)*numPixels); cudaMalloc((void **)&d_outputImageRGBA,sizeof(uchar4)*numPixels); cudaMalloc((void **)&d_red,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_green,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_blue,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_redBlurred,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_greenBlurred,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_blueBlurred,sizeof(unsigned char)*numPixels); cudaMalloc((void **)&d_filter,sizeof(float)*filterWidth*filterWidth); //将CPU中h_inputImageRGBA的数据复制到GPU中d_inputImageRGBA cudaMemcpy(d_inputImageRGBA,h_inputImageRGBA,sizeof(uchar4)*numPixels,cudaMemcpyHostToDevice); //将CPU中h_filter的数据复制到GPU中d_filter cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice); const dim3 blockSize(32,32,1); const dim3 gridSize((cols-1+32)/32,(rows-1+32)/32,1); //将输入图片分离为三个颜色通道 separate_channels<<<gridSize,blockSize>>>(d_inputImageRGBA,d_red,d_green,d_blue,rows,cols); cudaDeviceSynchronize(); //对单颜色通道进行高斯模糊 gaussblur<<<gridSize,blockSize>>>(d_red,d_redBlurred,d_filter,rows,cols,filterWidth); cudaDeviceSynchronize(); gaussblur<<<gridSize,blockSize>>>(d_green,d_greenBlurred,d_filter,rows,cols,filterWidth); cudaDeviceSynchronize(); gaussblur<<<gridSize,blockSize>>>(d_blue,d_blueBlurred,d_filter,rows,cols,filterWidth); cudaDeviceSynchronize(); //合并高斯模糊后的三条颜色通道 recombineChannels<<<gridSize,blockSize>>>(d_redBlurred,d_greenBlurred,d_blueBlurred,d_outputImageRGBA,rows,cols); cudaDeviceSynchronize(); //将GPU中d_outputImageRGBA的数据复制到CPU中h_outputImage cudaMemcpy(h_outputImage,d_outputImageRGBA,sizeof(uchar4)*numPixels,cudaMemcpyDeviceToHost); Mat output(rows,cols,CV_8UC4,h_outputImage); /*********************************************************************************** *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~释放显存~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* ***********************************************************************************/ cudaFree(d_red); cudaFree(d_green); cudaFree(d_blue); cudaFree(d_redBlurred); cudaFree(d_greenBlurred); cudaFree(d_blueBlurred); cudaFree(d_filter); cudaFree(d_inputImageRGBA); cudaFree(d_outputImageRGBA); return output; }
f7fabae2dbbb683c32675a5514fbf92b49a3665c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /*NCHW*/ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /*NHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } if (!channel_last) { input_data += (batch_idx * channels + c) * input_height * input_width; } else { input_data += batch_idx * input_height * input_width * channels; } T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; pool_process.compute(input_data[input_idx], &ele); } } int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, offsetC, batch_idx; if (!channel_last) { /* NCHW */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; offsetC = (index / input_width / input_height) % channels; batch_idx = index / input_width / input_height / channels; } else { /* NHWC */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; batch_idx = index / channels / input_width / input_height; } int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; phend = min(h_offset / stride_height + 1, output_height); pwend = min(w_offset / stride_width + 1, output_width); } T gradient = 0; T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_height * output_width; } else { output_stride = batch_idx * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_size; if (adaptive) { pool_size = static_cast<int>(ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; } int output_sub_idx = channel_last ? (ph * output_width + pw) * channels + offsetC : ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /* NCHW */ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /* NHWC */ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_height * input_width; } else { input_stride = batch_idx * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; if (ele == input_data[input_data_idx]) { maxIndex = input_data_idx; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, bool adaptive, T* output, hipStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream, nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, adaptive, output); } /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output->dims()[3] : output->dims()[1]; const int output_height = channel_last ? output->dims()[1] : output->dims()[2]; const int output_width = channel_last ? output->dims()[2] : output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data, channel_last); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } int input_data_stride; if (!channel_last) { /* NCDHW */ input_data_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { /* NDHWC */ input_data_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_data_stride; T ele = pool_process.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; pool_process.compute(input_data[input_data_idx], &ele); } } } int pool_size = (exclusive || adaptive) ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, d_offset, offsetC, batch_idx; if (!channel_last) { /* "NCDHW" */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; d_offset = (index / input_width / input_height) % input_depth + padding_depth; offsetC = (index / input_width / input_height / input_depth) % channels; batch_idx = index / input_width / input_height / input_depth / channels; } else { /* "NDHWC" */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; d_offset = (index / channels / input_width / input_height) % input_depth + padding_depth; batch_idx = index / channels / input_width / input_height / input_depth; } int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset < ksize_depth) ? 0 : (d_offset - ksize_depth) / stride_depth + 1; phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; pdend = min((d_offset) / stride_depth + 1, output_depth); phend = min((h_offset) / stride_height + 1, output_height); pwend = min((w_offset) / stride_width + 1, output_width); } T gradient = 0; T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; } else { output_stride = batch_idx * output_depth * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int pool_size; if (adaptive) { pool_size = static_cast<int>( ceil(static_cast<double>(input_depth) / ksize_depth)) * static_cast<int>( ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; } int output_sub_idx = channel_last ? ((pd * output_height + ph) * output_width + pw) * channels + offsetC : (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { /*NCDHW*/ pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { /*NDHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { input_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; if (ele == input_data[input_data_idx]) { stop = true; maxIdx = input_data_idx; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output->dims()[4] : output->dims()[1]; const int output_depth = channel_last ? output->dims()[1] : output->dims()[2]; const int output_height = channel_last ? output->dims()[2] : output->dims()[3]; const int output_width = channel_last ? output->dims()[3] : output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); // add channel_last } }; /* * tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data, channel_last); // add channel_last } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth); phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
f7fabae2dbbb683c32675a5514fbf92b49a3665c.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /*NCHW*/ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /*NHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } if (!channel_last) { input_data += (batch_idx * channels + c) * input_height * input_width; } else { input_data += batch_idx * input_height * input_width * channels; } T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; pool_process.compute(input_data[input_idx], &ele); } } int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, offsetC, batch_idx; if (!channel_last) { /* NCHW */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; offsetC = (index / input_width / input_height) % channels; batch_idx = index / input_width / input_height / channels; } else { /* NHWC */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; batch_idx = index / channels / input_width / input_height; } int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; phend = min(h_offset / stride_height + 1, output_height); pwend = min(w_offset / stride_width + 1, output_width); } T gradient = 0; T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_height * output_width; } else { output_stride = batch_idx * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_size; if (adaptive) { pool_size = static_cast<int>(ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; } int output_sub_idx = channel_last ? (ph * output_width + pw) * channels + offsetC : ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, c, batch_idx; if (!channel_last) { /* NCHW */ pw = index % output_width; ph = (index / output_width) % output_height; c = (index / output_width / output_height) % channels; batch_idx = index / output_width / output_height / channels; } else { /* NHWC */ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; batch_idx = index / channels / output_width / output_height; } int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_height * input_width; } else { input_stride = batch_idx * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? (h * input_width + w) * channels + c : h * input_width + w; if (ele == input_data[input_data_idx]) { maxIndex = input_data_idx; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()( const T* input, const std::vector<int>& input_shape, const std::vector<int>& output_shape, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_compute, bool exclusive, bool adaptive, T* output, cudaStream_t stream) { const int batch_size = input_shape[0]; const int input_channels = input_shape[1]; const int input_height = input_shape[2]; const int input_width = input_shape[3]; const int output_channels = output_shape[1]; const int output_height = output_shape[2]; const int output_width = output_shape[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>( nthreads, input, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_compute, exclusive, adaptive, output); } /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output->dims()[3] : output->dims()[1]; const int output_height = channel_last ? output->dims()[1] : output->dims()[2]; const int output_width = channel_last ? output->dims()[2] : output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); } }; /* * Tensors are in NCHW or NHWC format. * Ksize, strides are two elements. These two elements represent height * and width, respectively. * Paddings are four elements. These four elements represent height_up, * height_down, width_left and width_right, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[3] : input.dims()[1]; const int input_height = channel_last ? input.dims()[1] : input.dims()[2]; const int input_width = channel_last ? input.dims()[2] : input.dims()[3]; const int output_channels = channel_last ? output.dims()[3] : output.dims()[1]; const int output_height = channel_last ? output.dims()[1] : output.dims()[2]; const int output_width = channel_last ? output.dims()[2] : output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data, channel_last); } }; template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>, float>; template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } int input_data_stride; if (!channel_last) { /* NCDHW */ input_data_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { /* NDHWC */ input_data_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_data_stride; T ele = pool_process.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { auto input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; pool_process.compute(input_data[input_data_idx], &ele); } } } int pool_size = (exclusive || adaptive) ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, bool adaptive, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset, h_offset, d_offset, offsetC, batch_idx; if (!channel_last) { /* "NCDHW" */ w_offset = index % input_width + padding_width; h_offset = (index / input_width) % input_height + padding_height; d_offset = (index / input_width / input_height) % input_depth + padding_depth; offsetC = (index / input_width / input_height / input_depth) % channels; batch_idx = index / input_width / input_height / input_depth / channels; } else { /* "NDHWC" */ offsetC = index % channels; w_offset = (index / channels) % input_width + padding_width; h_offset = (index / channels / input_width) % input_height + padding_height; d_offset = (index / channels / input_width / input_height) % input_depth + padding_depth; batch_idx = index / channels / input_width / input_height / input_depth; } int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset < ksize_depth) ? 0 : (d_offset - ksize_depth) / stride_depth + 1; phstart = (h_offset < ksize_height) ? 0 : (h_offset - ksize_height) / stride_height + 1; pwstart = (w_offset < ksize_width) ? 0 : (w_offset - ksize_width) / stride_width + 1; pdend = min((d_offset) / stride_depth + 1, output_depth); phend = min((h_offset) / stride_height + 1, output_height); pwend = min((w_offset) / stride_width + 1, output_width); } T gradient = 0; T input = input_data[index]; int output_stride; if (!channel_last) { output_stride = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; } else { output_stride = batch_idx * output_depth * output_height * output_width * channels; } output_data += output_stride; output_grad += output_stride; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int pool_size; if (adaptive) { pool_size = static_cast<int>( ceil(static_cast<double>(input_depth) / ksize_depth)) * static_cast<int>( ceil(static_cast<double>(input_height) / ksize_height)) * static_cast<int>( ceil(static_cast<double>(input_width) / ksize_width)); } else { int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; } int output_sub_idx = channel_last ? ((pd * output_height + ph) * output_width + pw) * channels + offsetC : (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad, bool channel_last = false) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw, ph, pd, c, batch_idx; if (!channel_last) { /*NCDHW*/ pw = index % output_width; ph = (index / output_width) % output_height; pd = (index / output_width / output_height) % output_depth; c = (index / output_width / output_height / output_depth) % channels; batch_idx = index / output_width / output_height / output_depth / channels; } else { /*NDHWC*/ c = index % channels; pw = (index / channels) % output_width; ph = (index / channels / output_width) % output_height; pd = (index / channels / output_width / output_height) % output_depth; batch_idx = index / channels / output_width / output_height / output_depth; } int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; int input_stride; if (!channel_last) { input_stride = (batch_idx * channels + c) * input_depth * input_height * input_width; } else { input_stride = batch_idx * input_depth * input_height * input_width * channels; } input_data += input_stride; input_grad += input_stride; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { int input_data_idx = channel_last ? ((d * input_height + h) * input_width + w) * channels + c : (d * input_height + h) * input_width + w; if (ele == input_data[input_data_idx]) { stop = true; maxIdx = input_data_idx; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data); } void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* output) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output->dims()[4] : output->dims()[1]; const int output_depth = channel_last ? output->dims()[1] : output->dims()[2]; const int output_height = channel_last ? output->dims()[2] : output->dims()[3]; const int output_width = channel_last ? output->dims()[3] : output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, output_data, channel_last); } }; /* * Tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, PoolProcess pool_process, bool exclusive, bool adaptive, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, adaptive, input_grad_data, channel_last); // add channel_last } }; /* * tensors are in NCDHW or NDHWC format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. * Paddings are six elements. These six elements represent depth_forth, * depth_back, * height_up, height_down, width_left and width_right, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } void operator()( const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string data_format, framework::Tensor* input_grad) { bool channel_last = (data_format == "NDHWC"); const int batch_size = input.dims()[0]; const int input_channels = channel_last ? input.dims()[4] : input.dims()[1]; const int input_depth = channel_last ? input.dims()[1] : input.dims()[2]; const int input_height = channel_last ? input.dims()[2] : input.dims()[3]; const int input_width = channel_last ? input.dims()[3] : input.dims()[4]; const int output_channels = channel_last ? output.dims()[4] : output.dims()[1]; const int output_depth = channel_last ? output.dims()[1] : output.dims()[2]; const int output_height = channel_last ? output.dims()[2] : output.dims()[3]; const int output_width = channel_last ? output.dims()[3] : output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data, channel_last); // add channel_last } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart, hend; int wstart, wend; if (adaptive) { hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { hstart = ph * stride_height - padding_height; hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); wstart = pw * stride_width - padding_width; wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); } input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart, phend; int pwstart, pwend; if (adaptive) { phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart, dend; int hstart, hend; int wstart, wend; if (adaptive) { dstart = AdaptStartIndex(pd, input_depth, output_depth); dend = AdaptEndIndex(pd, input_depth, output_depth); hstart = AdaptStartIndex(ph, input_height, output_height); hend = AdaptEndIndex(ph, input_height, output_height); wstart = AdaptStartIndex(pw, input_width, output_width); wend = AdaptEndIndex(pw, input_width, output_width); } else { dstart = pd * stride_depth - padding_depth; hstart = ph * stride_height - padding_height; wstart = pw * stride_width - padding_width; dend = min(dstart + ksize_depth, input_depth); hend = min(hstart + ksize_height, input_height); wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); } T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, bool adaptive, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart, pdend; int phstart, phend; int pwstart, pwend; if (adaptive) { pdstart = d_offset * output_depth / input_depth; pdend = min((d_offset + 1) * output_depth / input_depth + 1, output_depth); phstart = h_offset * output_height / input_height; phend = min((h_offset + 1) * output_height / input_height + 1, output_height); pwstart = w_offset * output_width / input_width; pwend = min((w_offset + 1) * output_width / input_width + 1, output_width); } else { pdstart = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; phstart = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; pwstart = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth); phend = min((h_offset + padding_height) / stride_height + 1, output_height); pwend = min((w_offset + padding_width) / stride_width + 1, output_width); } T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool adaptive, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, adaptive, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
b5c015207285661501f89b6902fd2e94ecccf5a2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * Histogram */ #include <stdio.h> #include <iostream> #include <iomanip> #include <chrono> #include <string> #include <fstream> #include <assert.h> #include <limits.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> using namespace std::chrono; // # operator doesn't work :( #define MAP "32" #define BLOCK_SIZE 1024 #define VALIDATE 0 #define MANAGED 1 #define DEVICE_HOST 0 #define SPEC "" #if MANAGED + DEVICE_HOST == 1 #if MANAGED #undef SPEC #define SPEC "managed_only" #else #undef SPEC #define SPEC "dev_host_only" #endif #endif #define CPU "CPU" #define GPU "GPU" #define ENABLE_CPU 1 #define ENABLE_GPU 1 #define nBins 2048 #define STRIDE 512 #define MAX_INPUT_VALUE 4096 //#define VEC_SIZE 1073741824 // 0x40000000 //#define VEC_SIZE 268435456 // 0x10000000 //#define VEC_SIZE 268435455 // 0xFFFFFFF //#define VEC_SIZE 33554431 // 0x1FFFFFF //#define VEC_SIZE 1048575 // 0xFFFFF #define VEC_SIZE 16777215 // 0xFFFFFF // GPU Histogrm generation __global__ void histogrammizeVector(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const int strideX = blockDim.x*gridDim.x; const float BinSize = maxInputValue/(float)OutputSize; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); if(colIdx == 0) printf("GPU strideX: %d\n", strideX); for(int i = colIdx; i < InputSize; i+=strideX) { int binNr = (int)floorf(Input[i]/BinSize); if(i == 0) printf("GPU binNr: %d\n", binNr); if( Output[binNr] <= USHRT_MAX ) // ommit if max value of bin is already reached { (void)atomicAdd( &Output[binNr], 1); //increment } } } // GPU Histogrm generation __global__ void histogrammizeVector2(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const int chunkIdx = InputSize/blockDim.x*gridDim.x; const float BinSize = maxInputValue/(float)OutputSize; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); for(int i = colIdx*chunkIdx; i < (colIdx+1)*chunkIdx; i++) { if(i > InputSize) return; int binNr = (int)floorf(Input[i]/BinSize); if(i == 0) printf("GPU binNr: %d\n", binNr); if( Output[binNr] <= USHRT_MAX ) // ommit if max value of bin is already reached { (void)atomicAdd( &Output[binNr], 1); //increment } } } // GPU Histogrm generation __global__ void histogrammizeVector_kernelPerBin(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const float BinSize = maxInputValue/(float)OutputSize; int count = 0; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); for(int i = 0; i < InputSize; i++) { if(floorf(Input[i]) >= colIdx*BinSize && floorf(Input[i]) < (colIdx+1)*BinSize) count++; } (void)atomicAdd( &Output[colIdx], count & USHRT_MAX); //add } // CPU Histogram generation void histogrammizeVectorCPU(const float * const Input, int* Output, const float maxInputValue, const int InputSize, const int OutputSize){ const float BinSize = maxInputValue/(float)OutputSize; printf("CPU BinSize: %f\n", BinSize); for(int i = 0; i < InputSize; i++) { int binNr = static_cast<int>(Input[i]/BinSize); // assign value to corresponding bin if( Output[binNr] < USHRT_MAX ) // ommit if max value of bin already reached { Output[binNr]++; } } } void writeHistogramToFile(std::ofstream & save, int* const Output, const float maxInputValue, const int OutputSize) { const float BinSize = maxInputValue/(float)OutputSize; save << "Size of bin" <<";" << "Value" <<";" <<std::endl; int i = 0; for(; i < OutputSize; i++) save << BinSize*i <<";" << Output[i] <<";" <<std::endl; save << BinSize*(i+1) <<";" <<std::endl; //for gnuplot } void writeHistogramToFile2(std::ofstream & save, float* Input, const float maxInputValue, const int InputSize) { int i = 0; for(; i < InputSize; i++) save << Input[i] <<";" <<std::endl; } template<class T> void initializeVector(T *A, const unsigned long numberOfAllElements){ for(int i = 0; i < numberOfAllElements; i++){ A[i] = rand()%MAX_INPUT_VALUE; } } inline hipError_t checkCuda(hipError_t result, int line = -1) { if (result != hipSuccess) { std::cerr << "CUDA Runtime Error: " << hipGetErrorString(result) << " " << line <<std::endl; assert(result == hipSuccess); } return result; } template<class T> void initWith(const T num, T * const a, const int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } int main() { float * host_vectorInput, *dev_vectorInput; int * outputCPU, * host_outputGPU, *dev_outputGPU, *host_outputGPUgpk, *dev_outputGPUgpk, *host_outputGPU2, *dev_outputGPU2; const char * const filename_gpu = "out" MAP GPU ".csv"; const char * const filename_cpu = "out" MAP CPU ".csv"; const char * const filename_gpu_bpk = "out" MAP GPU "_bpk.csv"; const char * const filename_gpu2 = "out" MAP GPU "2.csv"; int MaxInputValue = MAX_INPUT_VALUE; int time; std::ofstream save; int numberOfElements = VEC_SIZE; size_t size = numberOfElements * sizeof(float); size_t sizeHist = nBins * sizeof(int); dim3 threads_per_block (BLOCK_SIZE, 1, 1); dim3 number_of_blocks ((VEC_SIZE / (BLOCK_SIZE * nBins))-1, 1, 1); std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::cout<<"start"<<std::endl; std::cout <<"Alloc vectors"<<std::endl; outputCPU = static_cast<int*>(malloc(sizeHist)); host_outputGPU = static_cast<int*>(malloc(sizeHist)); host_vectorInput = static_cast<float*>(malloc(size)); host_outputGPU2 = static_cast<int*>(malloc(sizeHist)); host_outputGPUgpk = static_cast<int*>(malloc(sizeHist)); checkCuda(hipMalloc((void**)&dev_outputGPU, sizeHist),__LINE__); checkCuda(hipMalloc((void**)&dev_vectorInput, size), __LINE__); checkCuda(hipMalloc((void**)&dev_outputGPU2, sizeHist), __LINE__); checkCuda(hipMalloc((void**)&dev_outputGPUgpk, sizeHist), __LINE__); // Initialize memory std::cout <<"Init vectors"<<std::endl; initWith((int)0, outputCPU, nBins); initWith((int)0, host_outputGPU, nBins); initWith((int)0, host_outputGPU2, nBins); initWith((int)0, host_outputGPUgpk, nBins); initializeVector(host_vectorInput, numberOfElements); checkCuda(hipMemcpy(dev_outputGPU, host_outputGPU, sizeHist, hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dev_vectorInput, host_vectorInput, size, hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dev_outputGPU2, host_outputGPU2, sizeHist, hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dev_outputGPUgpk, host_outputGPUgpk, sizeHist, hipMemcpyHostToDevice), __LINE__); #if ENABLE_GPU { std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; hipLaunchKernelGGL(( histogrammizeVector), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, dev_vectorInput, dev_outputGPU, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; hipDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); auto elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(hipMemcpy(host_outputGPU, dev_outputGPU, sizeHist, hipMemcpyDeviceToHost), __LINE__); checkCuda(hipFree(dev_outputGPU), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu); writeHistogramToFile(save, host_outputGPU, MaxInputValue, nBins); save.close(); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; hipLaunchKernelGGL(( histogrammizeVector_kernelPerBin), dim3(nBins/threads_per_block.x), dim3(threads_per_block), 0, 0, dev_vectorInput, dev_outputGPUgpk, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; hipDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(hipMemcpy(host_outputGPUgpk, dev_outputGPUgpk, sizeHist, hipMemcpyDeviceToHost), __LINE__); checkCuda(hipFree(dev_outputGPUgpk), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu_bpk); writeHistogramToFile(save, host_outputGPUgpk, MaxInputValue, nBins); save.close(); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; hipLaunchKernelGGL(( histogrammizeVector2), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, dev_vectorInput, dev_outputGPU2, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; hipDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(hipMemcpy(host_outputGPU2, dev_outputGPU2, sizeHist, hipMemcpyDeviceToHost), __LINE__); checkCuda(hipFree(dev_outputGPU2), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu2); writeHistogramToFile(save, host_outputGPU2, MaxInputValue, nBins); save.close(); } #endif #if ENABLE_CPU { std::cout <<"CPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; histogrammizeVectorCPU( host_vectorInput, outputCPU, MaxInputValue, numberOfElements, nBins); stop = std::chrono::high_resolution_clock::now(); auto elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_cpu); writeHistogramToFile(save, outputCPU, MaxInputValue, nBins); save.close(); } #endif checkCuda(hipFree(dev_vectorInput), __LINE__); free(outputCPU); free(host_outputGPU); free(host_outputGPU2); free(host_outputGPUgpk); free(host_vectorInput); }
b5c015207285661501f89b6902fd2e94ecccf5a2.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * Histogram */ #include <stdio.h> #include <iostream> #include <iomanip> #include <chrono> #include <string> #include <fstream> #include <assert.h> #include <limits.h> #include <cuda_runtime.h> #include <helper_cuda.h> using namespace std::chrono; // # operator doesn't work :( #define MAP "32" #define BLOCK_SIZE 1024 #define VALIDATE 0 #define MANAGED 1 #define DEVICE_HOST 0 #define SPEC "" #if MANAGED + DEVICE_HOST == 1 #if MANAGED #undef SPEC #define SPEC "managed_only" #else #undef SPEC #define SPEC "dev_host_only" #endif #endif #define CPU "CPU" #define GPU "GPU" #define ENABLE_CPU 1 #define ENABLE_GPU 1 #define nBins 2048 #define STRIDE 512 #define MAX_INPUT_VALUE 4096 //#define VEC_SIZE 1073741824 // 0x40000000 //#define VEC_SIZE 268435456 // 0x10000000 //#define VEC_SIZE 268435455 // 0xFFFFFFF //#define VEC_SIZE 33554431 // 0x1FFFFFF //#define VEC_SIZE 1048575 // 0xFFFFF #define VEC_SIZE 16777215 // 0xFFFFFF // GPU Histogrm generation __global__ void histogrammizeVector(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const int strideX = blockDim.x*gridDim.x; const float BinSize = maxInputValue/(float)OutputSize; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); if(colIdx == 0) printf("GPU strideX: %d\n", strideX); for(int i = colIdx; i < InputSize; i+=strideX) { int binNr = (int)floorf(Input[i]/BinSize); if(i == 0) printf("GPU binNr: %d\n", binNr); if( Output[binNr] <= USHRT_MAX ) // ommit if max value of bin is already reached { (void)atomicAdd( &Output[binNr], 1); //increment } } } // GPU Histogrm generation __global__ void histogrammizeVector2(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const int chunkIdx = InputSize/blockDim.x*gridDim.x; const float BinSize = maxInputValue/(float)OutputSize; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); for(int i = colIdx*chunkIdx; i < (colIdx+1)*chunkIdx; i++) { if(i > InputSize) return; int binNr = (int)floorf(Input[i]/BinSize); if(i == 0) printf("GPU binNr: %d\n", binNr); if( Output[binNr] <= USHRT_MAX ) // ommit if max value of bin is already reached { (void)atomicAdd( &Output[binNr], 1); //increment } } } // GPU Histogrm generation __global__ void histogrammizeVector_kernelPerBin(const float * const Input, int * Output, const float maxInputValue, const int InputSize, const int OutputSize){ const int colIdx = blockIdx.x * blockDim.x + threadIdx.x; const float BinSize = maxInputValue/(float)OutputSize; int count = 0; if(colIdx == 0) printf("GPU BinSize: %f\n", BinSize); for(int i = 0; i < InputSize; i++) { if(floorf(Input[i]) >= colIdx*BinSize && floorf(Input[i]) < (colIdx+1)*BinSize) count++; } (void)atomicAdd( &Output[colIdx], count & USHRT_MAX); //add } // CPU Histogram generation void histogrammizeVectorCPU(const float * const Input, int* Output, const float maxInputValue, const int InputSize, const int OutputSize){ const float BinSize = maxInputValue/(float)OutputSize; printf("CPU BinSize: %f\n", BinSize); for(int i = 0; i < InputSize; i++) { int binNr = static_cast<int>(Input[i]/BinSize); // assign value to corresponding bin if( Output[binNr] < USHRT_MAX ) // ommit if max value of bin already reached { Output[binNr]++; } } } void writeHistogramToFile(std::ofstream & save, int* const Output, const float maxInputValue, const int OutputSize) { const float BinSize = maxInputValue/(float)OutputSize; save << "Size of bin" <<";" << "Value" <<";" <<std::endl; int i = 0; for(; i < OutputSize; i++) save << BinSize*i <<";" << Output[i] <<";" <<std::endl; save << BinSize*(i+1) <<";" <<std::endl; //for gnuplot } void writeHistogramToFile2(std::ofstream & save, float* Input, const float maxInputValue, const int InputSize) { int i = 0; for(; i < InputSize; i++) save << Input[i] <<";" <<std::endl; } template<class T> void initializeVector(T *A, const unsigned long numberOfAllElements){ for(int i = 0; i < numberOfAllElements; i++){ A[i] = rand()%MAX_INPUT_VALUE; } } inline cudaError_t checkCuda(cudaError_t result, int line = -1) { if (result != cudaSuccess) { std::cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << " " << line <<std::endl; assert(result == cudaSuccess); } return result; } template<class T> void initWith(const T num, T * const a, const int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } int main() { float * host_vectorInput, *dev_vectorInput; int * outputCPU, * host_outputGPU, *dev_outputGPU, *host_outputGPUgpk, *dev_outputGPUgpk, *host_outputGPU2, *dev_outputGPU2; const char * const filename_gpu = "out" MAP GPU ".csv"; const char * const filename_cpu = "out" MAP CPU ".csv"; const char * const filename_gpu_bpk = "out" MAP GPU "_bpk.csv"; const char * const filename_gpu2 = "out" MAP GPU "2.csv"; int MaxInputValue = MAX_INPUT_VALUE; int time; std::ofstream save; int numberOfElements = VEC_SIZE; size_t size = numberOfElements * sizeof(float); size_t sizeHist = nBins * sizeof(int); dim3 threads_per_block (BLOCK_SIZE, 1, 1); dim3 number_of_blocks ((VEC_SIZE / (BLOCK_SIZE * nBins))-1, 1, 1); std::chrono::system_clock::time_point start; std::chrono::system_clock::time_point stop; std::cout<<"start"<<std::endl; std::cout <<"Alloc vectors"<<std::endl; outputCPU = static_cast<int*>(malloc(sizeHist)); host_outputGPU = static_cast<int*>(malloc(sizeHist)); host_vectorInput = static_cast<float*>(malloc(size)); host_outputGPU2 = static_cast<int*>(malloc(sizeHist)); host_outputGPUgpk = static_cast<int*>(malloc(sizeHist)); checkCuda(cudaMalloc((void**)&dev_outputGPU, sizeHist),__LINE__); checkCuda(cudaMalloc((void**)&dev_vectorInput, size), __LINE__); checkCuda(cudaMalloc((void**)&dev_outputGPU2, sizeHist), __LINE__); checkCuda(cudaMalloc((void**)&dev_outputGPUgpk, sizeHist), __LINE__); // Initialize memory std::cout <<"Init vectors"<<std::endl; initWith((int)0, outputCPU, nBins); initWith((int)0, host_outputGPU, nBins); initWith((int)0, host_outputGPU2, nBins); initWith((int)0, host_outputGPUgpk, nBins); initializeVector(host_vectorInput, numberOfElements); checkCuda(cudaMemcpy(dev_outputGPU, host_outputGPU, sizeHist, cudaMemcpyHostToDevice), __LINE__); checkCuda(cudaMemcpy(dev_vectorInput, host_vectorInput, size, cudaMemcpyHostToDevice), __LINE__); checkCuda(cudaMemcpy(dev_outputGPU2, host_outputGPU2, sizeHist, cudaMemcpyHostToDevice), __LINE__); checkCuda(cudaMemcpy(dev_outputGPUgpk, host_outputGPUgpk, sizeHist, cudaMemcpyHostToDevice), __LINE__); #if ENABLE_GPU { std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; histogrammizeVector<<<number_of_blocks, threads_per_block>>>( dev_vectorInput, dev_outputGPU, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; cudaDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); auto elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(cudaMemcpy(host_outputGPU, dev_outputGPU, sizeHist, cudaMemcpyDeviceToHost), __LINE__); checkCuda(cudaFree(dev_outputGPU), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu); writeHistogramToFile(save, host_outputGPU, MaxInputValue, nBins); save.close(); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; histogrammizeVector_kernelPerBin<<<nBins/threads_per_block.x, threads_per_block>>>( dev_vectorInput, dev_outputGPUgpk, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; cudaDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(cudaMemcpy(host_outputGPUgpk, dev_outputGPUgpk, sizeHist, cudaMemcpyDeviceToHost), __LINE__); checkCuda(cudaFree(dev_outputGPUgpk), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu_bpk); writeHistogramToFile(save, host_outputGPUgpk, MaxInputValue, nBins); save.close(); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::cout <<"GPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; histogrammizeVector2<<<number_of_blocks, threads_per_block>>>( dev_vectorInput, dev_outputGPU2, MaxInputValue, numberOfElements, nBins); std::cout <<"numberOfElements/MaxInputValue: "<< numberOfElements/MaxInputValue <<std::endl; cudaDeviceSynchronize(); stop = std::chrono::high_resolution_clock::now(); elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; checkCuda(cudaMemcpy(host_outputGPU2, dev_outputGPU2, sizeHist, cudaMemcpyDeviceToHost), __LINE__); checkCuda(cudaFree(dev_outputGPU2), __LINE__); #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_gpu2); writeHistogramToFile(save, host_outputGPU2, MaxInputValue, nBins); save.close(); } #endif #if ENABLE_CPU { std::cout <<"CPU"<<std::endl; start = std::chrono::high_resolution_clock::now(); std::cout <<"MaxInputValue: "<< MaxInputValue << ", numberOfElements: " << numberOfElements << ", nBins: " << nBins <<std::endl; histogrammizeVectorCPU( host_vectorInput, outputCPU, MaxInputValue, numberOfElements, nBins); stop = std::chrono::high_resolution_clock::now(); auto elapsed_time = duration_cast<microseconds>(stop - start); time = elapsed_time.count(); std::cout << "Time: " << time << std::endl; #if VALIDATE try{ // validateMatrix(host_result_CPU, result_stride, numberOfElements); } catch(const char* const e) { std::cout<<e<<std::endl; } #endif std::cout <<"\twriting to file..."<<std::endl; save.open(filename_cpu); writeHistogramToFile(save, outputCPU, MaxInputValue, nBins); save.close(); } #endif checkCuda(cudaFree(dev_vectorInput), __LINE__); free(outputCPU); free(host_outputGPU); free(host_outputGPU2); free(host_outputGPUgpk); free(host_vectorInput); }
853e2fde58f8c28ff764ca4f98f071b860337912.hip
// !!! This is a file automatically generated by hipify!!! /** * * @file common.cpp * * MAGMA (version 1.3.0) -- * Univ. of Tennessee, Knoxville * Univ. of California, Berkeley * Univ. of Colorado, Denver * November 2012 * **/ #include "common_magma.h" hipStream_t magma_stream = 0; hipblasStatus_t magmablasSetKernelStream( hipStream_t stream ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= magmablasSetKernelStream sets the CUDA stream that all MAGMA BLAS and CUBLAS routines use. Arguments ========= stream (input) hipStream_t The CUDA stream. ===================================================================== */ magma_stream = stream; return hipblasSetKernelStream( stream ); } hipblasStatus_t magmablasGetKernelStream( hipStream_t *stream ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= magmablasSetKernelStream gets the CUDA stream that all MAGMA BLAS routines use. Arguments ========= stream (output) hipStream_t The CUDA stream. ===================================================================== */ *stream = magma_stream; return HIPBLAS_STATUS_SUCCESS; }
853e2fde58f8c28ff764ca4f98f071b860337912.cu
/** * * @file common.cpp * * MAGMA (version 1.3.0) -- * Univ. of Tennessee, Knoxville * Univ. of California, Berkeley * Univ. of Colorado, Denver * November 2012 * **/ #include "common_magma.h" cudaStream_t magma_stream = 0; cublasStatus_t magmablasSetKernelStream( cudaStream_t stream ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= magmablasSetKernelStream sets the CUDA stream that all MAGMA BLAS and CUBLAS routines use. Arguments ========= stream (input) cudaStream_t The CUDA stream. ===================================================================== */ magma_stream = stream; return cublasSetKernelStream( stream ); } cublasStatus_t magmablasGetKernelStream( cudaStream_t *stream ) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose ======= magmablasSetKernelStream gets the CUDA stream that all MAGMA BLAS routines use. Arguments ========= stream (output) cudaStream_t The CUDA stream. ===================================================================== */ *stream = magma_stream; return CUBLAS_STATUS_SUCCESS; }
5e8810b4d33a8fa53d3ba838aef0d2d64bcd991c.hip
// !!! This is a file automatically generated by hipify!!! #ifndef GRB_BACKEND_APSPIE_KERNELS_SPMM_CU #define GRB_BACKEND_APSPIE_KERNELS_SPMM_CU #include <hip/hip_runtime.h> #include <cstdio> //#include <helper_math.h> //#define TA 32 //#define TB 32 //#define NT 64 namespace graphblas { namespace backend { typedef magma_index_t Index; // In paper "Design Principles for Sparse Matrix Multiplication" /*template<typename c, int TB> __global__ void spmmRowKernel2( const Index A_nrows, const Index B_ncols, const Index A_ncols, const Index A_nvals, const Index* A_csrRowPtr, const Index* A_csrColInd, const c* A_csrVal, const c* B_denseVal, c* C_denseVal ) { float vals[TB]; int col_all[TB]; float val_all[TB]; int thread_id = blockDim.x*blockIdx.x+threadIdx.x; // global thrd idx int warp_id = thread_id>>5; // global warp idx int lane_id = thread_id & (32 - 1); int row = warp_id; const c* B_offset = B_denseVal+lane_id+((blockIdx.y&1)<<5); int C_offset = (row<<6)+lane_id+((blockIdx.y&1)<<5); //if( threadIdx.x==0 ) // printf("row:%d\n", row); if( row < A_nrows ) { int row_start = __ldg(A_csrRowPtr+row); int row_end = __ldg(A_csrRowPtr+row+1); int col = -1; float val = 0.f; float sum = 0.f; int jj = row_start+lane_id; //TODO: add popc() and ballot to query which to shfl for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)<<6; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); vals[ ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); //vals[ ii] = __ldg(B_offset+col_all[ii]); } //if( warp_id==0 && blockIdx.y==0 ) // printf("row:%d,tid:%d,col_all:%d,ii:%d,load_id:%d,val:%f\n",row,thread_id,col_all>>6, ii, col_all+lane_id+((blockIdx.y&1)<<5), vals[ii]); #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( warp_id==0 && blockIdx.y==0 ) printf("tid:%d,val:%f\n", threadIdx.x, vals[0]); } } C_denseVal[C_offset] = sum; } } // spmmRowKernel2*/ // Varies by B_ncols template<typename c, int TB> __global__ void spmmRowKernel3( const Index A_nrows, const Index B_ncols, const Index A_ncols, const Index A_nvals, const Index* A_csrRowPtr, const Index* A_csrColInd, const c* A_csrVal, const c* B_denseVal, c* C_denseVal ) { float vals[TB]; int col_all[TB]; float val_all[TB]; int thread_id = blockDim.x*blockIdx.x+threadIdx.x; // global thrd idx int warp_id = thread_id>>5; // global warp idx int lane_id = thread_id & (32 - 1); int row = warp_id; const c* B_offset = B_denseVal+lane_id+(blockIdx.y<<5); //int C_offset = (row*B_ncols)+lane_id+(blockIdx.y<<5); int C_offset = (lane_id+(blockIdx.y<<5))*A_nrows+row; //if( threadIdx.x==0 ) // printf("row:%d\n", row); if( row < A_nrows ) { int row_start = __ldg(A_csrRowPtr+row); int row_end = __ldg(A_csrRowPtr+row+1); int col = -1; float val = 0.f; float sum = 0.f; int jj = row_start+lane_id; //TODO: add popc() and ballot to query which to shfl if( blockIdx.y!=gridDim.y-1 ) { for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)*B_ncols; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); vals[ ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); //vals[ ii] = __ldg(B_offset+col_all[ii]); } //if( warp_id==0 && blockIdx.y==0 ) // printf("row:%d,tid:%d,col_all:%d,ii:%d,load_id:%d,val:%f\n",row,thread_id,col_all>>6, ii, col_all+lane_id+((blockIdx.y&1)<<5), vals[ii]); #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( warp_id==0 && blockIdx.y==0 ) printf("tid:%d,val:%f\n", threadIdx.x, vals[0]); } } C_denseVal[C_offset] = sum; } else { int leftover = B_ncols - (blockIdx.y<<5); for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)*B_ncols; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( jj_start<row_start+32*5 && warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); if( lane_id<leftover ) vals[ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); else vals[ii] = 0.f; //vals[ ii] = __ldg(B_offset+col_all[ii]); //if( jj_start<row_start+32*5 && thread_id<2 && warp_id==0 && blockIdx.y==0 ) //printf("row:%d,tid:%d,ii:%d,val:%f\n",row,thread_id, ii, vals[ii]); } #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( jj_start<row_start+32*5 && warp_id==0 && blockIdx.y==0 ) printf("str tid:%d,val:%f\n", threadIdx.x, sum); //if( jj_start>row_end-32*5 && warp_id==0 && blockIdx.y==0 ) printf("end tid:%d,val:%f\n", threadIdx.x, sum); } } if( lane_id<leftover ) C_denseVal[C_offset] = sum; } } } // spmmRowKernel3 } // backend } // graphblas #endif // GRB_BACKEND_APSPIE_KERNELS_SPMM_CU
5e8810b4d33a8fa53d3ba838aef0d2d64bcd991c.cu
#ifndef GRB_BACKEND_APSPIE_KERNELS_SPMM_CU #define GRB_BACKEND_APSPIE_KERNELS_SPMM_CU #include <cuda.h> #include <cstdio> //#include <helper_math.h> //#define TA 32 //#define TB 32 //#define NT 64 namespace graphblas { namespace backend { typedef magma_index_t Index; // In paper "Design Principles for Sparse Matrix Multiplication" /*template<typename c, int TB> __global__ void spmmRowKernel2( const Index A_nrows, const Index B_ncols, const Index A_ncols, const Index A_nvals, const Index* A_csrRowPtr, const Index* A_csrColInd, const c* A_csrVal, const c* B_denseVal, c* C_denseVal ) { float vals[TB]; int col_all[TB]; float val_all[TB]; int thread_id = blockDim.x*blockIdx.x+threadIdx.x; // global thrd idx int warp_id = thread_id>>5; // global warp idx int lane_id = thread_id & (32 - 1); int row = warp_id; const c* B_offset = B_denseVal+lane_id+((blockIdx.y&1)<<5); int C_offset = (row<<6)+lane_id+((blockIdx.y&1)<<5); //if( threadIdx.x==0 ) // printf("row:%d\n", row); if( row < A_nrows ) { int row_start = __ldg(A_csrRowPtr+row); int row_end = __ldg(A_csrRowPtr+row+1); int col = -1; float val = 0.f; float sum = 0.f; int jj = row_start+lane_id; //TODO: add popc() and ballot to query which to shfl for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)<<6; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); vals[ ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); //vals[ ii] = __ldg(B_offset+col_all[ii]); } //if( warp_id==0 && blockIdx.y==0 ) // printf("row:%d,tid:%d,col_all:%d,ii:%d,load_id:%d,val:%f\n",row,thread_id,col_all>>6, ii, col_all+lane_id+((blockIdx.y&1)<<5), vals[ii]); #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( warp_id==0 && blockIdx.y==0 ) printf("tid:%d,val:%f\n", threadIdx.x, vals[0]); } } C_denseVal[C_offset] = sum; } } // spmmRowKernel2*/ // Varies by B_ncols template<typename c, int TB> __global__ void spmmRowKernel3( const Index A_nrows, const Index B_ncols, const Index A_ncols, const Index A_nvals, const Index* A_csrRowPtr, const Index* A_csrColInd, const c* A_csrVal, const c* B_denseVal, c* C_denseVal ) { float vals[TB]; int col_all[TB]; float val_all[TB]; int thread_id = blockDim.x*blockIdx.x+threadIdx.x; // global thrd idx int warp_id = thread_id>>5; // global warp idx int lane_id = thread_id & (32 - 1); int row = warp_id; const c* B_offset = B_denseVal+lane_id+(blockIdx.y<<5); //int C_offset = (row*B_ncols)+lane_id+(blockIdx.y<<5); int C_offset = (lane_id+(blockIdx.y<<5))*A_nrows+row; //if( threadIdx.x==0 ) // printf("row:%d\n", row); if( row < A_nrows ) { int row_start = __ldg(A_csrRowPtr+row); int row_end = __ldg(A_csrRowPtr+row+1); int col = -1; float val = 0.f; float sum = 0.f; int jj = row_start+lane_id; //TODO: add popc() and ballot to query which to shfl if( blockIdx.y!=gridDim.y-1 ) { for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)*B_ncols; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); vals[ ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); //vals[ ii] = __ldg(B_offset+col_all[ii]); } //if( warp_id==0 && blockIdx.y==0 ) // printf("row:%d,tid:%d,col_all:%d,ii:%d,load_id:%d,val:%f\n",row,thread_id,col_all>>6, ii, col_all+lane_id+((blockIdx.y&1)<<5), vals[ii]); #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( warp_id==0 && blockIdx.y==0 ) printf("tid:%d,val:%f\n", threadIdx.x, vals[0]); } } C_denseVal[C_offset] = sum; } else { int leftover = B_ncols - (blockIdx.y<<5); for( int jj_start=row_start; jj_start<row_end; jj_start+=32 ) { //#pragma unroll //for( int ii=0; ii<TB; ii++ ) // vals[ii] = 0.f; if( jj<row_end ) { col = __ldg(A_csrColInd+jj)*B_ncols; val = __ldg(A_csrVal+jj); } else { col = 0; val = 0.f; } jj+=32; //if( jj_start<row_start+32*5 && warp_id==0 ) printf("tid:%d,col:%d,val:%f\n", threadIdx.x, col, val); for( int kk=0; kk<32; kk+=TB ) { #pragma unroll for( int ii=0; ii<TB; ii++ ) { col_all[ii] = __shfl(col, ii+kk); val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*__ldg(B_offset+col_all[ii]); if( lane_id<leftover ) vals[ii] = val_all[ii]*__ldg(B_offset+col_all[ii]); else vals[ii] = 0.f; //vals[ ii] = __ldg(B_offset+col_all[ii]); //if( jj_start<row_start+32*5 && thread_id<2 && warp_id==0 && blockIdx.y==0 ) //printf("row:%d,tid:%d,ii:%d,val:%f\n",row,thread_id, ii, vals[ii]); } #pragma unroll for( int ii=0; ii<TB; ii++ ) { //val_all[ii] = __shfl(val, ii+kk); //sum += val_all[ii]*vals[ii]; sum += vals[ii]; // if( threadIdx.x==1 && warp_id==0 && blockIdx.y==0 ) printf("tid:%d,ii:%d,val:%f\n", threadIdx.x, ii, vals[ii]); } //if( jj_start<row_start+32*5 && warp_id==0 && blockIdx.y==0 ) printf("str tid:%d,val:%f\n", threadIdx.x, sum); //if( jj_start>row_end-32*5 && warp_id==0 && blockIdx.y==0 ) printf("end tid:%d,val:%f\n", threadIdx.x, sum); } } if( lane_id<leftover ) C_denseVal[C_offset] = sum; } } } // spmmRowKernel3 } // backend } // graphblas #endif // GRB_BACKEND_APSPIE_KERNELS_SPMM_CU
41e7cebea4721358ba1b6560de4db0bcd02f30f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////// to move 2 arrays and fill the tail __global__ void move( unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t size, const size_t num) { int F = threadIdx.x * size; for(int i=0; i != size; i++) { if(F+i < num) { d_outputPos[F+i] = d_inputPos[F+i]; d_outputVals[F+i] = d_inputVals[F+i]; } else { d_outputPos[F+i] = F+i ; d_outputVals[F+i] = 0; } } } //////// to mark big and small elements for partitioning __global__ void check( unsigned int* const val, int* lo, int* hi, unsigned int limit, int num ) { int tid = threadIdx.x; int gid = (2 * blockDim.x * blockIdx.x) + tid; if( gid < num ) { if( val[gid] > limit ) { hi[gid] = 1; lo[gid] = 0; } else { hi[gid] = 0; lo[gid] = 1; } } else { hi[gid] = 0; lo[gid] = 0; } if( gid+blockDim.x < num ) { if( val[gid+blockDim.x] > limit ) { hi[gid+blockDim.x] = 1; lo[gid+blockDim.x] = 0; } else { hi[gid+blockDim.x] = 0; lo[gid+blockDim.x] = 1; } } else { hi[gid+blockDim.x] = 0; lo[gid+blockDim.x] = 0; } } //////// to partition elements by indices __global__ void place( unsigned int* const val, unsigned int* const pos, unsigned int* d_Ov, unsigned int* d_Op, int* lo, int* hi, unsigned int limit, int size ) { int tid = threadIdx.x; int gid = (blockDim.x * blockIdx.x) + tid; if( gid < size ) if( val[gid] > limit ) { // H int iDst = size-1 - hi[gid]; d_Ov[iDst] = val[gid]; d_Op[iDst] = pos[gid]; } else { // L int i = lo[gid]; d_Ov[i] = val[gid]; d_Op[i] = pos[gid]; } } //////// to show the result __global__ void test( unsigned int* const val, unsigned int* const pos, const size_t start, const size_t end, float* d_x) { int I = threadIdx.x; float sum = val[start]; unsigned int min = val[start]; unsigned int max = val[start]; const size_t size = end - start; int zero = 0; int n = 1; int lt = 0; int eq = 0; int gt = 0; int z = -1; int zp = -1; int b = 0; int e = 0; for(int i=start+1; i < end; i++) { n = n + 1; sum = sum + val[i]; if( val[i] > max ) max = val[i]; if( val[i] < min ) min = val[i]; if( val[i] == 0 ) { zero = zero + 1; z = i; zp = pos[i]; } if( val[i-1] < val[i] ) lt = lt + 1; if( val[i-1] == val[i] ) eq = eq + 1; if( val[i-1] > val[i] ) gt = gt + 1; if( val[i] > 1044000000 ) b = b + 1; if( val[i] == 1 ) e = e + 1; } if( val[0] == 0 ) zero = zero + 1; printf(" size = %d, \n", n ); printf(" min = %d, \n", min ); printf(" max = %d, \n", max ); // printf(" avr = %f, \n", sum/n ); // printf(" n = %d, \n", n ); // printf(" z = %d, \n", z ); // printf(" zp = %d, \n", zp ); printf(" zeros = %d, \n", zero ); printf(" big = %d, \n", b ); // 923 // printf(" # 1 = %d, \n", e ); // 1165 for(int i=start; i != start+16; i++) { if( val[i] < 100 ) printf(" "); if( val[i] < 10 ) printf(" "); printf(" %d", val[i]); } printf("\n...\n"); for(int i=end-16; i != end; i++) { if( val[i] < 100 ) printf(" "); if( val[i] < 10 ) printf(" "); printf(" %d", val[i]); } printf("\n"); } ////////
41e7cebea4721358ba1b6560de4db0bcd02f30f3.cu
//////// to move 2 arrays and fill the tail __global__ void move( unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t size, const size_t num) { int F = threadIdx.x * size; for(int i=0; i != size; i++) { if(F+i < num) { d_outputPos[F+i] = d_inputPos[F+i]; d_outputVals[F+i] = d_inputVals[F+i]; } else { d_outputPos[F+i] = F+i ; d_outputVals[F+i] = 0; } } } //////// to mark big and small elements for partitioning __global__ void check( unsigned int* const val, int* lo, int* hi, unsigned int limit, int num ) { int tid = threadIdx.x; int gid = (2 * blockDim.x * blockIdx.x) + tid; if( gid < num ) { if( val[gid] > limit ) { hi[gid] = 1; lo[gid] = 0; } else { hi[gid] = 0; lo[gid] = 1; } } else { hi[gid] = 0; lo[gid] = 0; } if( gid+blockDim.x < num ) { if( val[gid+blockDim.x] > limit ) { hi[gid+blockDim.x] = 1; lo[gid+blockDim.x] = 0; } else { hi[gid+blockDim.x] = 0; lo[gid+blockDim.x] = 1; } } else { hi[gid+blockDim.x] = 0; lo[gid+blockDim.x] = 0; } } //////// to partition elements by indices __global__ void place( unsigned int* const val, unsigned int* const pos, unsigned int* d_Ov, unsigned int* d_Op, int* lo, int* hi, unsigned int limit, int size ) { int tid = threadIdx.x; int gid = (blockDim.x * blockIdx.x) + tid; if( gid < size ) if( val[gid] > limit ) { // H int iDst = size-1 - hi[gid]; d_Ov[iDst] = val[gid]; d_Op[iDst] = pos[gid]; } else { // L int i = lo[gid]; d_Ov[i] = val[gid]; d_Op[i] = pos[gid]; } } //////// to show the result __global__ void test( unsigned int* const val, unsigned int* const pos, const size_t start, const size_t end, float* d_x) { int I = threadIdx.x; float sum = val[start]; unsigned int min = val[start]; unsigned int max = val[start]; const size_t size = end - start; int zero = 0; int n = 1; int lt = 0; int eq = 0; int gt = 0; int z = -1; int zp = -1; int b = 0; int e = 0; for(int i=start+1; i < end; i++) { n = n + 1; sum = sum + val[i]; if( val[i] > max ) max = val[i]; if( val[i] < min ) min = val[i]; if( val[i] == 0 ) { zero = zero + 1; z = i; zp = pos[i]; } if( val[i-1] < val[i] ) lt = lt + 1; if( val[i-1] == val[i] ) eq = eq + 1; if( val[i-1] > val[i] ) gt = gt + 1; if( val[i] > 1044000000 ) b = b + 1; if( val[i] == 1 ) e = e + 1; } if( val[0] == 0 ) zero = zero + 1; printf(" size = %d, \n", n ); printf(" min = %d, \n", min ); printf(" max = %d, \n", max ); // printf(" avr = %f, \n", sum/n ); // printf(" n = %d, \n", n ); // printf(" z = %d, \n", z ); // printf(" zp = %d, \n", zp ); printf(" zeros = %d, \n", zero ); printf(" big = %d, \n", b ); // 923 // printf(" # 1 = %d, \n", e ); // 1165 for(int i=start; i != start+16; i++) { if( val[i] < 100 ) printf(" "); if( val[i] < 10 ) printf(" "); printf(" %d", val[i]); } printf("\n...\n"); for(int i=end-16; i != end; i++) { if( val[i] < 100 ) printf(" "); if( val[i] < 10 ) printf(" "); printf(" %d", val[i]); } printf("\n"); } ////////
221f3d706564af8ebecf4f7c352dfbee0ba41efb.hip
// !!! This is a file automatically generated by hipify!!! // // CasAES128_CUDA.c // CasAES128_CUDA // Created by Carter McCardwell on 11/11/14. // #include <stdint.h> #include <stdio.h> #include <time.h> #include <string.h> #include <hip/hip_runtime.h> const int Nb_h = 4; const int Nr_h = 10; const int Nk_h = 4; const uint8_t s_h[256]= { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; uint8_t Rcon_h[256] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d }; __constant__ uint8_t s[256]; __constant__ int Nb; __constant__ int Nr; __constant__ int Nk; __constant__ uint32_t ek[44]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void cudaDevAssist(hipError_t code, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", hipGetErrorString(code), line); if (abort) exit(code); } } uint32_t sw(uint32_t word) { union { uint32_t word; uint8_t bytes[4]; } subWord __attribute__ ((aligned)); subWord.word = word; subWord.bytes[3] = s_h[subWord.bytes[3]]; subWord.bytes[2] = s_h[subWord.bytes[2]]; subWord.bytes[1] = s_h[subWord.bytes[1]]; subWord.bytes[0] = s_h[subWord.bytes[0]]; return subWord.word; } __device__ void sb(uint8_t* in) { for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; } } __device__ void mc(uint8_t* arr) { for (int i = 0; i < 4; i++) { uint8_t a[4]; uint8_t b[4]; uint8_t c; uint8_t h; for(c=0;c<4;c++) { a[c] = arr[(4*c+i)]; h = (uint8_t)((signed char)arr[(4*c+i)] >> 7); b[c] = arr[(4*c+i)] << 1; b[c] ^= 0x1B & h; } arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1]; arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2]; arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3]; arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0]; } } __device__ void sr(uint8_t* arr) { uint8_t out[16]; //On per-row basis (+1 shift ea row) //Row 1 out[0] = arr[0]; out[1] = arr[1]; out[2] = arr[2]; out[3] = arr[3]; //Row 2 out[4] = arr[5]; out[5] = arr[6]; out[6] = arr[7]; out[7] = arr[4]; //Row 3 out[8] = arr[10]; out[9] = arr[11]; out[10] = arr[8]; out[11] = arr[9]; //Row 4 out[12] = arr[15]; out[13] = arr[12]; out[14] = arr[13]; out[15] = arr[14]; for (int i = 0; i < 16; i++) { arr[i] = out[i]; } } uint32_t rw(uint32_t word) { union { uint8_t bytes[4]; uint32_t word; } subWord __attribute__ ((aligned)); subWord.word = word; uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0]; subWord.bytes[3] = B1; //0 subWord.bytes[2] = B2; //1 subWord.bytes[1] = B3; //2 subWord.bytes[0] = B0; //3 return subWord.word; } void K_Exp(uint8_t* pk, uint32_t* out) { int i = 0; union { uint8_t bytes[4]; uint32_t word; } temp __attribute__ ((aligned)); union { uint8_t bytes[4]; uint32_t word; } univar[44] __attribute__ ((aligned)); for (i = 0; i < Nk_h; i++) { univar[i].bytes[3] = pk[i*4]; univar[i].bytes[2] = pk[i*4+1]; univar[i].bytes[1] = pk[i*4+2]; univar[i].bytes[0] = pk[i*4+3]; } for (i = Nk_h; i < Nb_h*(Nr_h+1); i++) { temp.word = univar[i-1].word; if (i % Nk_h == 0) { temp.word = (sw(rw(temp.word))); temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]); } else if (Nk_h > 6 && i % Nk_h == 4) { temp.word = sw(temp.word); } if (i-4 % Nk_h == 0) { temp.word = sw(temp.word); } univar[i].word = univar[i-Nk_h].word ^ temp.word; } for (i = 0; i < 44; i++) { out[i] = univar[i].word; } } __device__ void ark(uint8_t* state, int strD) { union { uint32_t word; uint8_t bytes[4]; } zero __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } one __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } two __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } three __attribute__ ((aligned)); zero.word = ek[strD]; one.word = ek[strD+1]; two.word = ek[strD+2]; three.word = ek[strD+3]; state[0] = state[0] ^ zero.bytes[3]; state[4] = state[4] ^ zero.bytes[2]; state[8] = state[8] ^ zero.bytes[1]; state[12] = state[12] ^ zero.bytes[0]; state[1] = state[1] ^ one.bytes[3]; state[5] = state[5] ^ one.bytes[2]; state[9] = state[9] ^ one.bytes[1]; state[13] = state[13] ^ one.bytes[0]; state[2] = state[2] ^ two.bytes[3]; state[6] = state[6] ^ two.bytes[2]; state[10] = state[10] ^ two.bytes[1]; state[14] = state[14] ^ two.bytes[0]; state[3] = state[3] ^ three.bytes[3]; state[7] = state[7] ^ three.bytes[2]; state[11] = state[11] ^ three.bytes[1]; state[15] = state[15] ^ three.bytes[0]; } __global__ void cudaRunner(uint8_t *in) { uint8_t state[16]; int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; } ark(state, 0); for (int i = 1; i < Nr; i++) { sb(state); sr(state); mc(state); ark(state, i*Nb); } sb(state); sr(state); ark(state, Nr*Nb); for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; } } int main(int argc, const char * argv[]) { printf("CasAES_CUDA Hyperthreaded AES-128 Encryption for CUDA processors - compiled 3/25/2015 Rev. 4\nCarter McCardwell, Northeastern University NUCAR - http://coe.neu.edu/~cmccardw - mccardwell.net\nPlease Wait...\n"); clock_t c_start, c_stop; c_start = clock(); FILE *infile; FILE *keyfile; FILE *outfile; infile = fopen(argv[2], "r"); if (infile == NULL) { printf("error (infile)\n"); return(1); } keyfile = fopen(argv[3], "rb"); if (keyfile == NULL) { printf("error (keyfile)\n"); return(1); } outfile = fopen(argv[4], "w"); if (outfile == NULL) { printf("error (outfile permission error, run with sudo)\n"); return(1); } //Hex info, or ASCII bool hexMode = false; if (strcmp(argv[1], "h") == 0) { hexMode = true; } else if (strcmp(argv[1], "a") == 0) { hexMode = false; } else { printf("error: first argument must be \'a\' for ASCII interpretation or \'h\' for hex interpretation\n"); return(1); } uint8_t key[16]; uint32_t ek_h[44]; for (int i = 0; i < 16; i++) { fscanf(keyfile, "%x", &key[i]); } K_Exp(key, ek_h); //send constants to GPU hipSetDevice(0); cudaDevAssist(hipMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, hipMemcpyHostToDevice), 535, true); cudaDevAssist(hipMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, hipMemcpyHostToDevice), 543, true); cudaDevAssist(hipMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, hipMemcpyHostToDevice), 903, true); cudaDevAssist(hipMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, hipMemcpyHostToDevice), 920, true); cudaDevAssist(hipMemcpyToSymbol(ek, &ek_h, 44*sizeof(uint32_t), 0, hipMemcpyHostToDevice), 823, true); hipDeviceSynchronize(); const int BLOCKS = -1; //Not used const int RUNNING_THREADS = 512; uint8_t *devState = NULL; cudaDevAssist(hipMalloc((void**)&devState, RUNNING_THREADS*16*sizeof(uint8_t)), 425, true); uint8_t states[RUNNING_THREADS][16] = { 0x00 }; int ch = 0; int spawn = 0; int end = 1; while (end) { spawn = 0; for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state { spawn++; for (int ix = 0; ix < 16; ix++) { if (hexMode) { if (fscanf(infile, "%x", &states[i][ix]) != EOF) { ; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } else { ch = getc(infile); if (ch != EOF) { states[i][ix] = ch; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } } } //arrange data correctly for (int i = 0; i < spawn; i++) { uint8_t temp[16]; memcpy(&temp[0], &states[i][0], sizeof(uint8_t)); memcpy(&temp[4], &states[i][1], sizeof(uint8_t)); memcpy(&temp[8], &states[i][2], sizeof(uint8_t)); memcpy(&temp[12], &states[i][3], sizeof(uint8_t)); memcpy(&temp[1], &states[i][4], sizeof(uint8_t)); memcpy(&temp[5], &states[i][5], sizeof(uint8_t)); memcpy(&temp[9], &states[i][6], sizeof(uint8_t)); memcpy(&temp[13], &states[i][7], sizeof(uint8_t)); memcpy(&temp[2], &states[i][8], sizeof(uint8_t)); memcpy(&temp[6], &states[i][9], sizeof(uint8_t)); memcpy(&temp[10], &states[i][10], sizeof(uint8_t)); memcpy(&temp[14], &states[i][11], sizeof(uint8_t)); memcpy(&temp[3], &states[i][12], sizeof(uint8_t)); memcpy(&temp[7], &states[i][13], sizeof(uint8_t)); memcpy(&temp[11], &states[i][14], sizeof(uint8_t)); memcpy(&temp[15], &states[i][15], sizeof(uint8_t)); for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); } } //printf("\nCycle!: Spawn = %i", spawn); cudaDevAssist(hipMemcpy(devState, *states, spawn*16*sizeof(uint8_t), hipMemcpyHostToDevice), 426, true); cudaDevAssist(hipDeviceSynchronize(), 268, true); hipLaunchKernelGGL(( cudaRunner), dim3(1),dim3(spawn), 0, 0, devState); cudaDevAssist(hipDeviceSynchronize(), 270, true); cudaDevAssist(hipMemcpy(*states, devState, spawn*16*sizeof(uint8_t), hipMemcpyDeviceToHost), 431, true); //Write results to out for (int i = 0; i < spawn; i++) { for (int ix = 0; ix < 4; ix++) { char hex[3]; sprintf(hex, "%02x", states[i][ix]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+4]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+8]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+12]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } } } } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("Done - Time taken: %f ms\n", diff); hipFree(devState); hipDeviceReset(); fclose(infile); fclose(outfile); fclose(keyfile); return 0; }
221f3d706564af8ebecf4f7c352dfbee0ba41efb.cu
// // CasAES128_CUDA.c // CasAES128_CUDA // Created by Carter McCardwell on 11/11/14. // #include <stdint.h> #include <stdio.h> #include <time.h> #include <string.h> #include <cuda_runtime.h> const int Nb_h = 4; const int Nr_h = 10; const int Nk_h = 4; const uint8_t s_h[256]= { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; uint8_t Rcon_h[256] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d }; __constant__ uint8_t s[256]; __constant__ int Nb; __constant__ int Nr; __constant__ int Nk; __constant__ uint32_t ek[44]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void cudaDevAssist(cudaError_t code, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line); if (abort) exit(code); } } uint32_t sw(uint32_t word) { union { uint32_t word; uint8_t bytes[4]; } subWord __attribute__ ((aligned)); subWord.word = word; subWord.bytes[3] = s_h[subWord.bytes[3]]; subWord.bytes[2] = s_h[subWord.bytes[2]]; subWord.bytes[1] = s_h[subWord.bytes[1]]; subWord.bytes[0] = s_h[subWord.bytes[0]]; return subWord.word; } __device__ void sb(uint8_t* in) { for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; } } __device__ void mc(uint8_t* arr) { for (int i = 0; i < 4; i++) { uint8_t a[4]; uint8_t b[4]; uint8_t c; uint8_t h; for(c=0;c<4;c++) { a[c] = arr[(4*c+i)]; h = (uint8_t)((signed char)arr[(4*c+i)] >> 7); b[c] = arr[(4*c+i)] << 1; b[c] ^= 0x1B & h; } arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1]; arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2]; arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3]; arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0]; } } __device__ void sr(uint8_t* arr) { uint8_t out[16]; //On per-row basis (+1 shift ea row) //Row 1 out[0] = arr[0]; out[1] = arr[1]; out[2] = arr[2]; out[3] = arr[3]; //Row 2 out[4] = arr[5]; out[5] = arr[6]; out[6] = arr[7]; out[7] = arr[4]; //Row 3 out[8] = arr[10]; out[9] = arr[11]; out[10] = arr[8]; out[11] = arr[9]; //Row 4 out[12] = arr[15]; out[13] = arr[12]; out[14] = arr[13]; out[15] = arr[14]; for (int i = 0; i < 16; i++) { arr[i] = out[i]; } } uint32_t rw(uint32_t word) { union { uint8_t bytes[4]; uint32_t word; } subWord __attribute__ ((aligned)); subWord.word = word; uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0]; subWord.bytes[3] = B1; //0 subWord.bytes[2] = B2; //1 subWord.bytes[1] = B3; //2 subWord.bytes[0] = B0; //3 return subWord.word; } void K_Exp(uint8_t* pk, uint32_t* out) { int i = 0; union { uint8_t bytes[4]; uint32_t word; } temp __attribute__ ((aligned)); union { uint8_t bytes[4]; uint32_t word; } univar[44] __attribute__ ((aligned)); for (i = 0; i < Nk_h; i++) { univar[i].bytes[3] = pk[i*4]; univar[i].bytes[2] = pk[i*4+1]; univar[i].bytes[1] = pk[i*4+2]; univar[i].bytes[0] = pk[i*4+3]; } for (i = Nk_h; i < Nb_h*(Nr_h+1); i++) { temp.word = univar[i-1].word; if (i % Nk_h == 0) { temp.word = (sw(rw(temp.word))); temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]); } else if (Nk_h > 6 && i % Nk_h == 4) { temp.word = sw(temp.word); } if (i-4 % Nk_h == 0) { temp.word = sw(temp.word); } univar[i].word = univar[i-Nk_h].word ^ temp.word; } for (i = 0; i < 44; i++) { out[i] = univar[i].word; } } __device__ void ark(uint8_t* state, int strD) { union { uint32_t word; uint8_t bytes[4]; } zero __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } one __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } two __attribute__ ((aligned)); union { uint32_t word; uint8_t bytes[4]; } three __attribute__ ((aligned)); zero.word = ek[strD]; one.word = ek[strD+1]; two.word = ek[strD+2]; three.word = ek[strD+3]; state[0] = state[0] ^ zero.bytes[3]; state[4] = state[4] ^ zero.bytes[2]; state[8] = state[8] ^ zero.bytes[1]; state[12] = state[12] ^ zero.bytes[0]; state[1] = state[1] ^ one.bytes[3]; state[5] = state[5] ^ one.bytes[2]; state[9] = state[9] ^ one.bytes[1]; state[13] = state[13] ^ one.bytes[0]; state[2] = state[2] ^ two.bytes[3]; state[6] = state[6] ^ two.bytes[2]; state[10] = state[10] ^ two.bytes[1]; state[14] = state[14] ^ two.bytes[0]; state[3] = state[3] ^ three.bytes[3]; state[7] = state[7] ^ three.bytes[2]; state[11] = state[11] ^ three.bytes[1]; state[15] = state[15] ^ three.bytes[0]; } __global__ void cudaRunner(uint8_t *in) { uint8_t state[16]; int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; } ark(state, 0); for (int i = 1; i < Nr; i++) { sb(state); sr(state); mc(state); ark(state, i*Nb); } sb(state); sr(state); ark(state, Nr*Nb); for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; } } int main(int argc, const char * argv[]) { printf("CasAES_CUDA Hyperthreaded AES-128 Encryption for CUDA processors - compiled 3/25/2015 Rev. 4\nCarter McCardwell, Northeastern University NUCAR - http://coe.neu.edu/~cmccardw - mccardwell.net\nPlease Wait...\n"); clock_t c_start, c_stop; c_start = clock(); FILE *infile; FILE *keyfile; FILE *outfile; infile = fopen(argv[2], "r"); if (infile == NULL) { printf("error (infile)\n"); return(1); } keyfile = fopen(argv[3], "rb"); if (keyfile == NULL) { printf("error (keyfile)\n"); return(1); } outfile = fopen(argv[4], "w"); if (outfile == NULL) { printf("error (outfile permission error, run with sudo)\n"); return(1); } //Hex info, or ASCII bool hexMode = false; if (strcmp(argv[1], "h") == 0) { hexMode = true; } else if (strcmp(argv[1], "a") == 0) { hexMode = false; } else { printf("error: first argument must be \'a\' for ASCII interpretation or \'h\' for hex interpretation\n"); return(1); } uint8_t key[16]; uint32_t ek_h[44]; for (int i = 0; i < 16; i++) { fscanf(keyfile, "%x", &key[i]); } K_Exp(key, ek_h); //send constants to GPU cudaSetDevice(0); cudaDevAssist(cudaMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true); cudaDevAssist(cudaMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true); cudaDevAssist(cudaMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true); cudaDevAssist(cudaMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true); cudaDevAssist(cudaMemcpyToSymbol(ek, &ek_h, 44*sizeof(uint32_t), 0, cudaMemcpyHostToDevice), 823, true); cudaThreadSynchronize(); const int BLOCKS = -1; //Not used const int RUNNING_THREADS = 512; uint8_t *devState = NULL; cudaDevAssist(cudaMalloc((void**)&devState, RUNNING_THREADS*16*sizeof(uint8_t)), 425, true); uint8_t states[RUNNING_THREADS][16] = { 0x00 }; int ch = 0; int spawn = 0; int end = 1; while (end) { spawn = 0; for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state { spawn++; for (int ix = 0; ix < 16; ix++) { if (hexMode) { if (fscanf(infile, "%x", &states[i][ix]) != EOF) { ; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } else { ch = getc(infile); if (ch != EOF) { states[i][ix] = ch; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } } } //arrange data correctly for (int i = 0; i < spawn; i++) { uint8_t temp[16]; memcpy(&temp[0], &states[i][0], sizeof(uint8_t)); memcpy(&temp[4], &states[i][1], sizeof(uint8_t)); memcpy(&temp[8], &states[i][2], sizeof(uint8_t)); memcpy(&temp[12], &states[i][3], sizeof(uint8_t)); memcpy(&temp[1], &states[i][4], sizeof(uint8_t)); memcpy(&temp[5], &states[i][5], sizeof(uint8_t)); memcpy(&temp[9], &states[i][6], sizeof(uint8_t)); memcpy(&temp[13], &states[i][7], sizeof(uint8_t)); memcpy(&temp[2], &states[i][8], sizeof(uint8_t)); memcpy(&temp[6], &states[i][9], sizeof(uint8_t)); memcpy(&temp[10], &states[i][10], sizeof(uint8_t)); memcpy(&temp[14], &states[i][11], sizeof(uint8_t)); memcpy(&temp[3], &states[i][12], sizeof(uint8_t)); memcpy(&temp[7], &states[i][13], sizeof(uint8_t)); memcpy(&temp[11], &states[i][14], sizeof(uint8_t)); memcpy(&temp[15], &states[i][15], sizeof(uint8_t)); for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); } } //printf("\nCycle!: Spawn = %i", spawn); cudaDevAssist(cudaMemcpy(devState, *states, spawn*16*sizeof(uint8_t), cudaMemcpyHostToDevice), 426, true); cudaDevAssist(cudaDeviceSynchronize(), 268, true); cudaRunner<<<1,spawn>>>(devState); cudaDevAssist(cudaDeviceSynchronize(), 270, true); cudaDevAssist(cudaMemcpy(*states, devState, spawn*16*sizeof(uint8_t), cudaMemcpyDeviceToHost), 431, true); //Write results to out for (int i = 0; i < spawn; i++) { for (int ix = 0; ix < 4; ix++) { char hex[3]; sprintf(hex, "%02x", states[i][ix]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+4]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+8]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+12]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } } } } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("Done - Time taken: %f ms\n", diff); cudaFree(devState); cudaDeviceReset(); fclose(infile); fclose(outfile); fclose(keyfile); return 0; }
8a3051fd7f8a15de6f146f84648c1446ae22cc81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { __global__ void logkernel_32(const int lengthA, const float *a, float *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { b[i] = log(a[i]); } } }
8a3051fd7f8a15de6f146f84648c1446ae22cc81.cu
extern "C" { __global__ void logkernel_32(const int lengthA, const float *a, float *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { b[i] = log(a[i]); } } }
0e95172ab09294cbf6aaef221042b7b769603fb6.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************************************************** Created by Mohsen Safari. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> //////////////////////////////////////////////////////////////////////////////// // Pure Functions //////////////////////////////////////////////////////////////////////////////// /*@ requires 0 <= p; ensures p < \result; pure int ExpTwo(int p) = 0 < p ? 2 * ExpTwo(p - 1) : 1; @*/ /*@ ensures |xs| == 0 ==> \result == 0; ensures |xs| == 1 ==> \result == head(xs); pure int intsum(seq<int> xs) = 0 < |xs| ? head(xs) + intsum(tail(xs)) : 0; @*/ /*@ requires n <= |xs|; ensures n < 0 ==> |Take(xs, n)| == 0; ensures 0 <= n ==> |Take(xs, n)| == n; ensures (\forall int i; 0 <= i && i < n; xs[i] == get(Take(xs, n), i)); pure seq<int> Take(seq<int> xs, int n) = 0 < n ? seq<int> { head(xs) } + Take(tail(xs), n - 1) : seq<int> { }; @*/ /*@ requires 0 <= i && i <= |xs|; ensures |\result| == |xs| - i; ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, i+j))); pure seq<int> psum(seq<int> xs, int i) = i < |xs| ? seq<int> { intsum(Take(xs, i)) } + psum(xs, i + 1) : seq<int> { }; @*/ // TODO use this version instead of the above `psum` (the above version is just a helper definition). /*@ ensures |\result| == |xs|; ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, j))); pure seq<int> psum2(seq<int> xs) = psum(xs, 0); @*/ /*@ requires |xs| >= 0; ensures |xs| == 0 ==> \result == xs; ensures |xs| == 1 ==> \result == xs; ensures |xs| == 2 ==> \result == seq<int> { head(xs) + head(tail(xs)) }; ensures |xs| % 2 == 0 ==> |\result| == |xs| / 2; pure seq<int> implode(seq<int> xs) = 1 < |xs| ? seq<int> { head(xs) + head(tail(xs)) } + implode(tail(tail(xs))) : xs; @*/ /*@ requires 0 <= p; pure int exp(int n, int p) = 0 < p ? n * exp(n, p - 1) : 1; @*/ /*@ requires 0 <= n; requires n < |xs|; pure int get(seq<int> xs, int n) = xs[n]; @*/ /*@ requires k > 0; requires |xs| == ExpTwo(k); requires i >= 0 && i <= |xs|; requires 1 <= lvl && lvl <= k; requires stride == ExpTwo(lvl-1); requires stride > 0 && stride < |xs|; ensures |\result| == |xs| - i; ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && ((i+j) >= stride) && (((i+j) % (2*stride)) == (2*stride-1))) ==> \result[j] == xs[i+j] + xs[i+j - stride]); ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && (((i+j) < stride) || (((i+j) % (2*stride)) != (2*stride-1)))) ==> \result[j] == xs[i+j]); pure seq<int> up(seq<int> xs, int stride, int i, int k, int lvl) = i < |xs| ? ( ((i % (2*stride)) == (2*stride-1) && (i >= stride)? seq<int> {xs[i] + xs[i-stride]} + up(xs, stride, i+1, k, lvl) : seq<int> {xs[i]} + up(xs, stride, i+1, k, lvl) )) : seq<int> {}; @*/ ////////////////////////////////////////////////////////////////////////////////////////Lemmas /*@ ensures \result && intsum(seq<int> { }) == 0; pure bool lemma_intsum_zero() = true; @*/ /*@ ensures \result && psum2(seq<int> { }) == seq<int> { }; pure bool lemma_psum_zero() = true; @*/ /*@ ensures \result && intsum(seq<int> { x }) == x; pure bool lemma_intsum_single(int x); @*/ /*@ requires |xs| == 1; ensures \result && psum2(xs) == seq<int> {0}; pure bool lemma_psum_single(seq<int> xs); @*/ /*@ requires |xs| >= 0; requires |ys| >= 0; ensures |xs| == 0 ==> intsum(xs + ys) == intsum(ys); ensures |ys| == 0 ==> intsum(xs + ys) == intsum(xs); ensures |xs + ys| == |xs| + |ys|; ensures \result && intsum(tail(xs) + ys) == intsum(tail(xs)) + intsum(ys); ensures \result && intsum(xs + ys) == intsum(xs) + intsum(ys); pure bool lemma_intsum_app(seq<int> xs, seq<int> ys); @*/ /*@ requires |xs| <= 1; ensures \result && xs == implode(xs); pure bool lemma_implode_base(seq<int> xs) = true; @*/ /*@ ensures \result && intsum(xs) == intsum(implode(xs)); pure bool lemma_implode_sum(seq<int> xs); @*/ /*@ requires 0 < n; ensures \result && ExpTwo(n) == 2 * ExpTwo(n - 1); pure bool lemma_exp2_red_mult(int n) = true; @*/ /*@ requires 0 < n; ensures \result && ExpTwo(n) / 2 == ExpTwo(n - 1); pure bool lemma_exp2_red_div(int n) = true; @*/ /*@ requires 0 <= n; ensures \result && 0 < ExpTwo(n); pure bool lemma_exp2_positive(int n); @*/ /*@ requires 0 <= i; requires i <= j; ensures \result && ExpTwo(i) <= ExpTwo(j); pure bool lemma_exp2_leq(int i, int j); @*/ /*@ requires i >= 0 && j >= 0; requires ExpTwo(i) == ExpTwo(j); ensures \result && i == j; pure bool power_two_lemma(int i, int j); @*/ /*@ requires |xs| % 2 == 0; ensures \result && |implode(xs)| == |xs| / 2; pure bool lemma_implode_length_mod_two(seq<int> xs); @*/ /*@ requires 0 < n && |xs| == ExpTwo(n); ensures \result && |implode(xs)| == ExpTwo(n - 1); pure bool lemma_implode_red_exp2(seq<int> xs, int n); @*/ /*@ requires 0 < i; requires i < |xs|; ensures \result && get(tail(xs), i - 1) == xs[i]; pure bool lemma_intseq_index_tail(seq<int> xs, int i) = true; @*/ /*@ requires |xs| % 2 == 0; requires 0 <= i && i < |implode(xs)|; requires (2 * i) < |xs|; requires (2 * i + 1) < |xs|; ensures \result && get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]; pure bool lemma_implode_get(seq<int> xs, int i); @*/ /*@ requires j >= 0 && j <= |implode(xs)|; requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; ensures \result && (\forall int i; j <= i && i < |implode(xs)|; get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]); pure bool lemma_implode_get_all(seq<int> xs, int j); @*/ /*@ requires |xs| == 2 * |ys|; requires 0 <= |ys|; requires (\forall int i; 0 <= i && i < |ys|; ys[i] == xs[2*i] + xs[2*i+1]); ensures \result && ys == implode(xs); pure bool lemma_implode_rel(seq<int> xs, seq<int> ys); @*/ /*@ requires 0 <= i && i < |xs|; ensures \result && get(psum2(xs), i) == intsum(Take(xs, i)); pure bool lemma_psum_get(seq<int> xs, int i); @*/ /*@ requires j >= 0 && j <= |xs|; ensures \result && (\forall int i; j <= i && i < |xs|; get(psum2(xs), i) == intsum(Take(xs, i))); pure bool lemma_psum_get_all(seq<int> xs, int j); @*/ /*@ requires 0 < n && n <= |xs|; ensures \result && Take(xs, n) == Take(xs, n - 1) + seq<int> { xs[n - 1] }; pure bool missing_lemma_2(seq<int> xs, int n); @*/ /*@ requires |xs| % 2 == 0; requires |ys| % 2 == 0; ensures \result && implode(xs + ys) == implode(xs) + implode(ys); pure bool missing_lemma_3(seq<int> xs, seq<int> ys); @*/ /*@ ensures \result && xs + (ys + zs) == (xs + ys) + zs; pure bool intseq_concat_assoc(seq<int> xs, seq<int> ys, seq<int> zs) = true; @*/ /*@ requires |xs| % 2 == 0; requires 0 <= n && n < |implode(xs)|; requires |implode(xs)| == |xs| / 2; ensures \result && Take(implode(xs), n) == implode(Take(xs, 2 * n)); pure bool missing_lemma(seq<int> xs, int n); @*/ /*@ requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; requires 0 <= i && i < |implode(xs)|; requires 2 * i < |xs|; ensures \result && get(psum2(implode(xs)), i) == intsum(Take(xs, 2 * i)); pure bool lemma_psum_Take2(seq<int> xs, int i); @*/ /*@ requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; requires 0 <= i && i < |implode(xs)|; requires 2 * i < |xs|; ensures \result && get(psum2(implode(xs)), i) == get(psum2(xs), 2 * i); pure bool lemma_get_psum_implode(seq<int> xs, int i); @*/ /*@ requires 0 <= i; requires 2 * i + 1 < |xs|; ensures \result && get(psum2(xs), 2 * i + 1) == get(psum2(xs), 2 * i) + get(xs, 2 * i); pure bool lemma_combine_psum(seq<int> xs, int i); @*/ //////////////////////////////////////////////////////////////////////////////// //Kernel //////////////////////////////////////////////////////////////////////////////// /*@ //given seq<int> input_seq; context_everywhere output != NULL; context_everywhere k == 10; //context_everywhere |input_seq| == ExpTwo(k); context_everywhere opencl_gsize == ExpTwo(k); context_everywhere opencl_gcount == 1; //requires (2 * \ltid < ExpTwo(k)) ==> \pointer_index(output, 2 * \ltid, 1); //requires (2 * \ltid + 1 < ExpTwo(k)) ==> \pointer_index(output, 2 * \ltid + 1, 1); //ensures \pointer_index(output, \ltid, 1); @*/ __global__ void CUDA_Kernel_Blelloch(int* output, int k) { int tid = threadIdx.x; //@ assert tid == \ltid; //@ inhale (2 * tid < ExpTwo(k)) ==> \pointer_index(output, 2 * tid, 1); //@ inhale (2 * tid + 1 < ExpTwo(k)) ==> \pointer_index(output, 2 * tid + 1, 1); //@ inhale (tid == 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % 1 != 0; \pointer_index(output, i, 1)); //@ ghost seq<int> input_seq; //@ assume |input_seq| == ExpTwo(k); //@ assume (2 * tid < ExpTwo(k)) ==> output[2 * tid] == input_seq[2 * tid]; //@ assume (2 * tid + 1 < ExpTwo(k)) ==> output[2 * tid + 1] == input_seq[2 * tid + 1]; int indicator = 2 * tid + 1; int stride = 1; int lvl = 1; //@ ghost seq<seq<int> > Matrix_UP = seq<seq<int> > { input_seq }; //@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i)); //@ ghost seq<seq<int> > Matrix = seq<seq<int> > { input_seq }; /*@ loop_invariant k > 0; loop_invariant tid >= 0 && tid < ExpTwo(k); loop_invariant stride > 0; loop_invariant 1 <= lvl; loop_invariant stride == ExpTwo(lvl-1); loop_invariant lvl <= k+1; loop_invariant indicator + 1 == ExpTwo(lvl)*(tid+1); loop_invariant indicator + 1 == 2*stride*(tid+1); loop_invariant indicator > 0; loop_invariant stride <= ExpTwo(k); loop_invariant indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); loop_invariant tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); loop_invariant (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); loop_invariant |Matrix_UP| == lvl; loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix_UP[i]| == ExpTwo(k)); loop_invariant lvl == 1 ==> Matrix_UP[lvl - 1] == input_seq; loop_invariant lvl > 1 && lvl < |Matrix_UP| ==> Matrix_UP[lvl] == up(Matrix_UP[lvl - 1], (stride/2) - 1, 0, k, lvl - 1); loop_invariant indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == output[indicator - stride]; loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][ExpTwo(k) - 1] == intsum(input_seq); loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][(ExpTwo(k) - 1)/2] == intsum(Take(input_seq, |input_seq|/2)); loop_invariant |Matrix| == lvl; loop_invariant (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); loop_invariant (\forall int i; 0 < i && i < lvl; Matrix[i] == implode(Matrix[i - 1])); loop_invariant (\forall int i; 0 <= i && i < lvl; intsum(Matrix[i]) == intsum(input_seq)); loop_invariant Matrix[0] == input_seq; loop_invariant indicator < ExpTwo(k) && 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1]; loop_invariant indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; @*/ while(stride < ExpTwo(k)) { if(indicator < ExpTwo(k) && indicator >= stride) { //@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1]; //@ assert 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; output[indicator] = output[indicator] + output[indicator - stride]; //@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]; } //@ assert lemma_implode_length_mod_two(Matrix[lvl - 1]); //@ assert lemma_implode_sum(Matrix[lvl - 1]); //@ assert lemma_implode_get_all(Matrix[lvl - 1], 0); //@ ghost Matrix = Matrix + seq<seq<int> > { implode(Matrix[lvl - 1]) }; //@ ghost tid < |implode(Matrix[lvl - 1])| ? (lemma_implode_get(Matrix[lvl - 1], tid) && (2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]) && (indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]) && (Matrix[lvl] == implode(Matrix[lvl - 1])) && (indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == Matrix[lvl][tid])) : true; /*if(tid < |implode(Matrix[lvl - 1])|){ lemma_implode_get(Matrix[lvl - 1], tid); assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]; assert indicator < output.length && indicator >= stride ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]; assert Matrix[lvl] == implode(Matrix[lvl - 1]); assert indicator < output.length && indicator >= stride ==> output[indicator] == Matrix[lvl][tid]; }*/ /*@ context_everywhere k > 0; context_everywhere 1 <= lvl && lvl <= k; context_everywhere |Matrix| == lvl + 1; requires tid >= 0 && tid < ExpTwo(k); requires stride == ExpTwo(lvl-1); requires stride > 0 && stride < ExpTwo(k); requires indicator + 1 == ExpTwo(lvl)*(tid+1); requires indicator + 1 == 2*stride*(tid+1); requires indicator > 0; requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); ensures tid >= 0 && tid < ExpTwo(k); ensures 2 * stride == ExpTwo(lvl); ensures 2 * stride > 0 && 2 * stride <= ExpTwo(k); ensures 2 * indicator + 2 == ExpTwo(lvl+1)*(tid+1); ensures 2 * indicator + 2 == 2*stride*(tid+1); ensures 2 * indicator + 1 > 0; ensures 2 * indicator + 1 < ExpTwo(lvl) ==> \pointer_index(output, 2 * indicator + 1, 1); ensures 2 * indicator + 1 < ExpTwo(lvl) && 2 * indicator + 1 >= 2 * stride ==> \pointer_index(output, 2 * indicator + 1 - 2 * stride, 1); ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(lvl) && (i + 1) % (2 * stride) != 0; \pointer_index(output, i, 1)); ensures (tid==0 && (2 * stride == ExpTwo(lvl))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); @*/ __syncthreads(); //@ ghost Matrix_UP = Matrix_UP + seq<seq<int> > { up(Matrix_UP[lvl - 1], stride, 0, k, lvl) }; //@ assert (indicator < ExpTwo(k)) && (indicator >= stride) ==> Matrix_UP[lvl][indicator] == Matrix_UP[lvl - 1][indicator] + Matrix_UP[lvl - 1][indicator-stride]; indicator = 2 * indicator + 1; stride = 2 * stride; lvl = lvl + 1; //@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i)); //@ assert stride == ExpTwo(lvl-1); //@ assert lemma_exp2_red_mult(lvl); //@ assert ExpTwo(lvl) == 2 * ExpTwo(lvl - 1); //@ assert 2*stride == ExpTwo(lvl); //@ assert indicator + 1 == ExpTwo(lvl)*(tid+1); //@ assert indicator + 1 == 2*stride*(tid+1); } //@ assert stride == ExpTwo(lvl-1); //@ assert ExpTwo(lvl-1) == ExpTwo(k); //@ assert stride == ExpTwo(k); //@ assert power_two_lemma(lvl-1, k); //@ assert lvl == k + 1; //@ assert indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; //@ assert |Matrix| == lvl; //@ assert (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); //@ assert (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1])); //@ assert (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(input_seq)); //@ assert |Matrix[k]| == 1; //@ assert lemma_intsum_single(Matrix[k][0]); //@ assert intsum(Matrix[k]) == intsum(input_seq); //@ assert Matrix[k] == seq<int>{intsum(input_seq)}; //@ assert Matrix[0] == input_seq; //@ assert (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); ///////////////////////////////////////////////////////////////////////////////// //@ assert indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; /*@ context_everywhere k > 0; context_everywhere |Matrix_UP| == k + 1; context_everywhere |Matrix| == k + 1; context_everywhere lvl == k + 1; context stride == ExpTwo(k); context indicator + 1 == ExpTwo(lvl)*(tid+1); context indicator + 1 == 2*stride*(tid+1); context indicator > 0; context stride > 0 ; requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); requires (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); requires (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k)); requires (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); requires (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == output[indicator - stride]; requires indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; context tid >= 0 && tid < ExpTwo(k); //ensures stride == ExpTwo(k) / 2; //ensures indicator == ExpTwo(k) * tid + ExpTwo(k) - 1; //ensures stride > 0 ; //ensures indicator > 0; ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> \pointer_index(output, ExpTwo(k) * \ltid + ExpTwo(k) - 1, 1); ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> \pointer_index(output, ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2, 1); ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (ExpTwo(k) / 2) != 0; \pointer_index(output, i, 1)); ensures (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k)); ensures (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); ensures (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1] == output[ExpTwo(k) * \ltid + ExpTwo(k) - 1]; //ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == output[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2]; //ensures 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> output[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid); @*/ __syncthreads(); // (unstability) These come from the last three postconditions in the previous barrier: //@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1] == output[ExpTwo(k) * tid + ExpTwo(k) - 1]; //@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == output[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2]; //@ assume 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> output[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid); /////////////////////////////////////////////////////////////////////////////////////// Down indicator = ExpTwo(k) * tid + ExpTwo(k) - 1; // output.length * tid + output.length - 1; stride = ExpTwo(k) / 2; // output.length / 2; lvl = k - 1; //lvl - 2; int temp; //@ ghost seq<int> temp_seq = seq<int> { 0 }; //@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl + 1][indicator] == output[indicator]; //@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl + 1][indicator - stride] == output[indicator - stride]; if(indicator < ExpTwo(k)) { output[indicator] = 0; } /*@ loop_invariant k > 0; loop_invariant tid >= 0 && tid < ExpTwo(k); loop_invariant lvl <= k - 1; loop_invariant lvl >= -1; loop_invariant lvl >= 0 ==> stride == ExpTwo(lvl); loop_invariant lvl == -1 ==> stride == 0; loop_invariant stride == 0 ==> lvl == -1; loop_invariant stride >= 0; loop_invariant indicator >= 0; loop_invariant indicator+1 == ExpTwo(lvl+1)*(tid+1); loop_invariant indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); loop_invariant lvl >= 0 && indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); loop_invariant (tid==0 && stride > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); //loop_invariant lvl == -1 ==> \pointer_index(output, tid, 1); //loop_invariant lvl == -1 ==> indicator == tid; //loop_invariant indicator == tid ==> lvl == -1; loop_invariant |temp_seq| == ExpTwo(k - (lvl + 1)); loop_invariant 0 < |temp_seq| && |temp_seq| <= ExpTwo(k); loop_invariant temp_seq == psum2(Matrix[lvl + 1]); loop_invariant (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); loop_invariant (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); loop_invariant (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(input_seq)); loop_invariant (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1])); loop_invariant Matrix[0] == input_seq; loop_invariant Matrix[k] == seq<int>{ intsum(input_seq) }; loop_invariant tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == output[indicator]; loop_invariant lvl >= 0 && 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == get(Matrix[lvl], 2 * tid); @*/ while(stride >= 1) { if(indicator < ExpTwo(k) && indicator >= stride) { //@ assert tid < |temp_seq| ==> temp_seq[tid] == output[indicator]; temp = output[indicator]; //@ assert tid < |temp_seq| ==> temp == temp_seq[tid]; output[indicator] = output[indicator] + output[indicator - stride]; //@ assert tid < |temp_seq| ==> output[indicator] == temp_seq[tid] + output[indicator - stride]; //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator - stride] == get(Matrix[lvl], 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> output[indicator] == temp_seq[tid] + get(Matrix[lvl], 2 * tid); //@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid); //@ assert tid < |Matrix[lvl + 1]| && 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl + 1]), tid) + get(Matrix[lvl], 2 * tid); //@ assert Matrix[lvl + 1] == implode(Matrix[lvl]); //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(implode(Matrix[lvl])), tid) + get(Matrix[lvl], 2 * tid); //@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true; /*if(tid < |implode(Matrix[lvl])|){ lemma_get_psum_implode(Matrix[lvl], tid); }*/ //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid); //@ ghost 2 * tid + 1 < |Matrix[lvl]| ? lemma_combine_psum(Matrix[lvl], tid) : true; /*if(2 * tid + 1 < |Matrix[lvl]|){ lemma_combine_psum(Matrix[lvl], tid); }*/ //@ assert 2 * tid + 1 < |Matrix[lvl]| ==> get(psum2(Matrix[lvl]), 2 * tid + 1) == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid); //@ assert 2 * tid + 1 < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl]), 2 * tid + 1); //@ assert tid < |temp_seq| ==> temp == temp_seq[tid]; output[indicator - stride] = temp; //@ assert tid < |temp_seq| ==> output[indicator - stride] == temp_seq[tid]; //@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid); //@ assert Matrix[lvl + 1] == implode(Matrix[lvl]); //@ assert tid < |implode(Matrix[lvl])| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(implode(Matrix[lvl])), tid); //@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true; /*if(tid < |implode(Matrix[lvl])|){ lemma_get_psum_implode(Matrix[lvl], tid); }*/ //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid); } //@ ghost temp_seq = psum2(Matrix[lvl]); //@ assert 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == temp_seq[2 * tid]; //@ assert 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == temp_seq[2 * tid + 1]; /*@ context_everywhere lvl >= 0 && lvl <= k - 1; requires tid >= 0 && tid < ExpTwo(k); context_everywhere |temp_seq| == ExpTwo(k - lvl); context_everywhere 0 < |temp_seq| && |temp_seq| <= ExpTwo(k); context_everywhere |Matrix| == k + 1; //context lvl - 1 == -1 ==> (indicator - 1) / 2 == \ltid; //context (indicator - 1) / 2 == \ltid ==> lvl - 1 == -1; requires indicator >= 0; requires stride >= 1 ; requires stride == ExpTwo(lvl); requires indicator+1 == ExpTwo(lvl+1)*(\ltid+1); requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); //requires 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == temp_seq[2 * tid]; //requires 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == temp_seq[2 * tid + 1]; requires (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); requires (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //requires 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid); ensures tid >= 0 && tid < ExpTwo(k); ensures lvl-1 >= 0 ==> stride / 2 == ExpTwo(lvl - 1); ensures lvl-1 == -1 ==> stride / 2 == 0; ensures stride / 2 == 0 ==> lvl-1 == -1; ensures stride / 2 >= 0; ensures (indicator - 1) / 2 >= 0; ensures (indicator - 1) / 2+1 == ExpTwo(lvl)*(tid+1); ensures (indicator - 1) / 2 < ExpTwo(k) ==> \pointer_index(output, (indicator - 1) / 2, 1); ensures lvl-1 >= 0 && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> \pointer_index(output, (indicator - 1) / 2 - stride / 2, 1); ensures (tid==0 && stride/2 > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (stride/2) != 0; \pointer_index(output, i, 1)); ensures (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); ensures (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //ensures tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == output[(indicator - 1) / 2]; //ensures lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> output[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid); @*/ __syncthreads(); // (unstability) These come from the last two postconditions in the previous barrier: //@ assume tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == output[(indicator - 1) / 2]; //@ assume lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> output[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid); indicator = (indicator - 1) / 2; stride = stride / 2; lvl = lvl - 1; } //@ assert temp_seq == psum2(Matrix[0]); //@ assert Matrix[0] == input_seq; //@ assert temp_seq == psum2(input_seq); //@ assert tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == output[indicator]; } //////////////////////////////////////////////////////////////////////////////// // CUDA Functions //////////////////////////////////////////////////////////////////////////////// //@ ensures \pointer(\result, N, 1); int *vercorsMallocInt(int N); void vercorsFreeInt(int *ar); //@ ensures \pointer(\result, N, 1); int *vercorsCudaMallocInt(int N); void vercorsCudaFreeInt(int *addr); //@ context \pointer(src, N, read) ** \pointer(tgt, N, 1); //@ ensures (\forall int i; i >= 0 && i < N; src[i] == tgt[i]); void vercorsCudaMemcpyInt(int *tgt, int *src, int N, int direction); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int CUDA_Host_Blelloch( int argc, char** argv) { int k = 10; // size of the input is 2^k int* host_input = vercorsMallocInt(ExpTwo(k)); // size of the host_input is 2^k int* host_output = vercorsMallocInt(ExpTwo(k)); // size of the host_output is 2^k //@ loop_invariant k == 10; //@ loop_invariant q >= 0 && q <= ExpTwo(k); //@ loop_invariant \pointer(host_input, ExpTwo(k), 1) ** \pointer(host_output, ExpTwo(k), 1); //@ loop_invariant (\forall int i; i >= 0 && i < q; host_input[i] == host_output[i]); for(int q=0; q<ExpTwo(k); q++) { host_output[q] = host_input[q]; } //Copy the arrays to device memory int* device_output; device_output = vercorsCudaMallocInt(ExpTwo(k)); vercorsCudaMemcpyInt(device_output, host_output, ExpTwo(k), hipMemcpyHostToDevice) ; //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); host_output[i] == device_output[i]); //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); host_output[i] == host_input[i]); //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); device_output[i] == host_input[i]); //setup execution parameters int num_of_blocks = 1; int num_of_threads_per_block = ExpTwo(k); //Kernel launch hipLaunchKernelGGL(( CUDA_Kernel_Blelloch), dim3(/*grid*/num_of_blocks), dim3(/*threads*/num_of_threads_per_block/*), 0*/ , 0, 0, device_output, k); // copy result from device to host //vercorsCudaMemcpyInt(host_output, device_output, ExpTwo(k), hipMemcpyDeviceToHost); // cleanup memory vercorsFreeInt(host_output); vercorsCudaFreeInt(device_output); }
0e95172ab09294cbf6aaef221042b7b769603fb6.cu
/*********************************************************************************** Created by Mohsen Safari. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> //////////////////////////////////////////////////////////////////////////////// // Pure Functions //////////////////////////////////////////////////////////////////////////////// /*@ requires 0 <= p; ensures p < \result; pure int ExpTwo(int p) = 0 < p ? 2 * ExpTwo(p - 1) : 1; @*/ /*@ ensures |xs| == 0 ==> \result == 0; ensures |xs| == 1 ==> \result == head(xs); pure int intsum(seq<int> xs) = 0 < |xs| ? head(xs) + intsum(tail(xs)) : 0; @*/ /*@ requires n <= |xs|; ensures n < 0 ==> |Take(xs, n)| == 0; ensures 0 <= n ==> |Take(xs, n)| == n; ensures (\forall int i; 0 <= i && i < n; xs[i] == get(Take(xs, n), i)); pure seq<int> Take(seq<int> xs, int n) = 0 < n ? seq<int> { head(xs) } + Take(tail(xs), n - 1) : seq<int> { }; @*/ /*@ requires 0 <= i && i <= |xs|; ensures |\result| == |xs| - i; ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, i+j))); pure seq<int> psum(seq<int> xs, int i) = i < |xs| ? seq<int> { intsum(Take(xs, i)) } + psum(xs, i + 1) : seq<int> { }; @*/ // TODO use this version instead of the above `psum` (the above version is just a helper definition). /*@ ensures |\result| == |xs|; ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, j))); pure seq<int> psum2(seq<int> xs) = psum(xs, 0); @*/ /*@ requires |xs| >= 0; ensures |xs| == 0 ==> \result == xs; ensures |xs| == 1 ==> \result == xs; ensures |xs| == 2 ==> \result == seq<int> { head(xs) + head(tail(xs)) }; ensures |xs| % 2 == 0 ==> |\result| == |xs| / 2; pure seq<int> implode(seq<int> xs) = 1 < |xs| ? seq<int> { head(xs) + head(tail(xs)) } + implode(tail(tail(xs))) : xs; @*/ /*@ requires 0 <= p; pure int exp(int n, int p) = 0 < p ? n * exp(n, p - 1) : 1; @*/ /*@ requires 0 <= n; requires n < |xs|; pure int get(seq<int> xs, int n) = xs[n]; @*/ /*@ requires k > 0; requires |xs| == ExpTwo(k); requires i >= 0 && i <= |xs|; requires 1 <= lvl && lvl <= k; requires stride == ExpTwo(lvl-1); requires stride > 0 && stride < |xs|; ensures |\result| == |xs| - i; ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && ((i+j) >= stride) && (((i+j) % (2*stride)) == (2*stride-1))) ==> \result[j] == xs[i+j] + xs[i+j - stride]); ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && (((i+j) < stride) || (((i+j) % (2*stride)) != (2*stride-1)))) ==> \result[j] == xs[i+j]); pure seq<int> up(seq<int> xs, int stride, int i, int k, int lvl) = i < |xs| ? ( ((i % (2*stride)) == (2*stride-1) && (i >= stride)? seq<int> {xs[i] + xs[i-stride]} + up(xs, stride, i+1, k, lvl) : seq<int> {xs[i]} + up(xs, stride, i+1, k, lvl) )) : seq<int> {}; @*/ ////////////////////////////////////////////////////////////////////////////////////////Lemmas /*@ ensures \result && intsum(seq<int> { }) == 0; pure bool lemma_intsum_zero() = true; @*/ /*@ ensures \result && psum2(seq<int> { }) == seq<int> { }; pure bool lemma_psum_zero() = true; @*/ /*@ ensures \result && intsum(seq<int> { x }) == x; pure bool lemma_intsum_single(int x); @*/ /*@ requires |xs| == 1; ensures \result && psum2(xs) == seq<int> {0}; pure bool lemma_psum_single(seq<int> xs); @*/ /*@ requires |xs| >= 0; requires |ys| >= 0; ensures |xs| == 0 ==> intsum(xs + ys) == intsum(ys); ensures |ys| == 0 ==> intsum(xs + ys) == intsum(xs); ensures |xs + ys| == |xs| + |ys|; ensures \result && intsum(tail(xs) + ys) == intsum(tail(xs)) + intsum(ys); ensures \result && intsum(xs + ys) == intsum(xs) + intsum(ys); pure bool lemma_intsum_app(seq<int> xs, seq<int> ys); @*/ /*@ requires |xs| <= 1; ensures \result && xs == implode(xs); pure bool lemma_implode_base(seq<int> xs) = true; @*/ /*@ ensures \result && intsum(xs) == intsum(implode(xs)); pure bool lemma_implode_sum(seq<int> xs); @*/ /*@ requires 0 < n; ensures \result && ExpTwo(n) == 2 * ExpTwo(n - 1); pure bool lemma_exp2_red_mult(int n) = true; @*/ /*@ requires 0 < n; ensures \result && ExpTwo(n) / 2 == ExpTwo(n - 1); pure bool lemma_exp2_red_div(int n) = true; @*/ /*@ requires 0 <= n; ensures \result && 0 < ExpTwo(n); pure bool lemma_exp2_positive(int n); @*/ /*@ requires 0 <= i; requires i <= j; ensures \result && ExpTwo(i) <= ExpTwo(j); pure bool lemma_exp2_leq(int i, int j); @*/ /*@ requires i >= 0 && j >= 0; requires ExpTwo(i) == ExpTwo(j); ensures \result && i == j; pure bool power_two_lemma(int i, int j); @*/ /*@ requires |xs| % 2 == 0; ensures \result && |implode(xs)| == |xs| / 2; pure bool lemma_implode_length_mod_two(seq<int> xs); @*/ /*@ requires 0 < n && |xs| == ExpTwo(n); ensures \result && |implode(xs)| == ExpTwo(n - 1); pure bool lemma_implode_red_exp2(seq<int> xs, int n); @*/ /*@ requires 0 < i; requires i < |xs|; ensures \result && get(tail(xs), i - 1) == xs[i]; pure bool lemma_intseq_index_tail(seq<int> xs, int i) = true; @*/ /*@ requires |xs| % 2 == 0; requires 0 <= i && i < |implode(xs)|; requires (2 * i) < |xs|; requires (2 * i + 1) < |xs|; ensures \result && get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]; pure bool lemma_implode_get(seq<int> xs, int i); @*/ /*@ requires j >= 0 && j <= |implode(xs)|; requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; ensures \result && (\forall int i; j <= i && i < |implode(xs)|; get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]); pure bool lemma_implode_get_all(seq<int> xs, int j); @*/ /*@ requires |xs| == 2 * |ys|; requires 0 <= |ys|; requires (\forall int i; 0 <= i && i < |ys|; ys[i] == xs[2*i] + xs[2*i+1]); ensures \result && ys == implode(xs); pure bool lemma_implode_rel(seq<int> xs, seq<int> ys); @*/ /*@ requires 0 <= i && i < |xs|; ensures \result && get(psum2(xs), i) == intsum(Take(xs, i)); pure bool lemma_psum_get(seq<int> xs, int i); @*/ /*@ requires j >= 0 && j <= |xs|; ensures \result && (\forall int i; j <= i && i < |xs|; get(psum2(xs), i) == intsum(Take(xs, i))); pure bool lemma_psum_get_all(seq<int> xs, int j); @*/ /*@ requires 0 < n && n <= |xs|; ensures \result && Take(xs, n) == Take(xs, n - 1) + seq<int> { xs[n - 1] }; pure bool missing_lemma_2(seq<int> xs, int n); @*/ /*@ requires |xs| % 2 == 0; requires |ys| % 2 == 0; ensures \result && implode(xs + ys) == implode(xs) + implode(ys); pure bool missing_lemma_3(seq<int> xs, seq<int> ys); @*/ /*@ ensures \result && xs + (ys + zs) == (xs + ys) + zs; pure bool intseq_concat_assoc(seq<int> xs, seq<int> ys, seq<int> zs) = true; @*/ /*@ requires |xs| % 2 == 0; requires 0 <= n && n < |implode(xs)|; requires |implode(xs)| == |xs| / 2; ensures \result && Take(implode(xs), n) == implode(Take(xs, 2 * n)); pure bool missing_lemma(seq<int> xs, int n); @*/ /*@ requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; requires 0 <= i && i < |implode(xs)|; requires 2 * i < |xs|; ensures \result && get(psum2(implode(xs)), i) == intsum(Take(xs, 2 * i)); pure bool lemma_psum_Take2(seq<int> xs, int i); @*/ /*@ requires |xs| % 2 == 0; requires |implode(xs)| == |xs|/2; requires 0 <= i && i < |implode(xs)|; requires 2 * i < |xs|; ensures \result && get(psum2(implode(xs)), i) == get(psum2(xs), 2 * i); pure bool lemma_get_psum_implode(seq<int> xs, int i); @*/ /*@ requires 0 <= i; requires 2 * i + 1 < |xs|; ensures \result && get(psum2(xs), 2 * i + 1) == get(psum2(xs), 2 * i) + get(xs, 2 * i); pure bool lemma_combine_psum(seq<int> xs, int i); @*/ //////////////////////////////////////////////////////////////////////////////// //Kernel //////////////////////////////////////////////////////////////////////////////// /*@ //given seq<int> input_seq; context_everywhere output != NULL; context_everywhere k == 10; //context_everywhere |input_seq| == ExpTwo(k); context_everywhere opencl_gsize == ExpTwo(k); context_everywhere opencl_gcount == 1; //requires (2 * \ltid < ExpTwo(k)) ==> \pointer_index(output, 2 * \ltid, 1); //requires (2 * \ltid + 1 < ExpTwo(k)) ==> \pointer_index(output, 2 * \ltid + 1, 1); //ensures \pointer_index(output, \ltid, 1); @*/ __global__ void CUDA_Kernel_Blelloch(int* output, int k) { int tid = threadIdx.x; //@ assert tid == \ltid; //@ inhale (2 * tid < ExpTwo(k)) ==> \pointer_index(output, 2 * tid, 1); //@ inhale (2 * tid + 1 < ExpTwo(k)) ==> \pointer_index(output, 2 * tid + 1, 1); //@ inhale (tid == 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % 1 != 0; \pointer_index(output, i, 1)); //@ ghost seq<int> input_seq; //@ assume |input_seq| == ExpTwo(k); //@ assume (2 * tid < ExpTwo(k)) ==> output[2 * tid] == input_seq[2 * tid]; //@ assume (2 * tid + 1 < ExpTwo(k)) ==> output[2 * tid + 1] == input_seq[2 * tid + 1]; int indicator = 2 * tid + 1; int stride = 1; int lvl = 1; //@ ghost seq<seq<int> > Matrix_UP = seq<seq<int> > { input_seq }; //@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i)); //@ ghost seq<seq<int> > Matrix = seq<seq<int> > { input_seq }; /*@ loop_invariant k > 0; loop_invariant tid >= 0 && tid < ExpTwo(k); loop_invariant stride > 0; loop_invariant 1 <= lvl; loop_invariant stride == ExpTwo(lvl-1); loop_invariant lvl <= k+1; loop_invariant indicator + 1 == ExpTwo(lvl)*(tid+1); loop_invariant indicator + 1 == 2*stride*(tid+1); loop_invariant indicator > 0; loop_invariant stride <= ExpTwo(k); loop_invariant indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); loop_invariant tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); loop_invariant (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); loop_invariant |Matrix_UP| == lvl; loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix_UP[i]| == ExpTwo(k)); loop_invariant lvl == 1 ==> Matrix_UP[lvl - 1] == input_seq; loop_invariant lvl > 1 && lvl < |Matrix_UP| ==> Matrix_UP[lvl] == up(Matrix_UP[lvl - 1], (stride/2) - 1, 0, k, lvl - 1); loop_invariant indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == output[indicator - stride]; loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][ExpTwo(k) - 1] == intsum(input_seq); loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][(ExpTwo(k) - 1)/2] == intsum(Take(input_seq, |input_seq|/2)); loop_invariant |Matrix| == lvl; loop_invariant (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); loop_invariant (\forall int i; 0 < i && i < lvl; Matrix[i] == implode(Matrix[i - 1])); loop_invariant (\forall int i; 0 <= i && i < lvl; intsum(Matrix[i]) == intsum(input_seq)); loop_invariant Matrix[0] == input_seq; loop_invariant indicator < ExpTwo(k) && 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1]; loop_invariant indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; @*/ while(stride < ExpTwo(k)) { if(indicator < ExpTwo(k) && indicator >= stride) { //@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1]; //@ assert 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; output[indicator] = output[indicator] + output[indicator - stride]; //@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]; } //@ assert lemma_implode_length_mod_two(Matrix[lvl - 1]); //@ assert lemma_implode_sum(Matrix[lvl - 1]); //@ assert lemma_implode_get_all(Matrix[lvl - 1], 0); //@ ghost Matrix = Matrix + seq<seq<int> > { implode(Matrix[lvl - 1]) }; //@ ghost tid < |implode(Matrix[lvl - 1])| ? (lemma_implode_get(Matrix[lvl - 1], tid) && (2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]) && (indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]) && (Matrix[lvl] == implode(Matrix[lvl - 1])) && (indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == Matrix[lvl][tid])) : true; /*if(tid < |implode(Matrix[lvl - 1])|){ lemma_implode_get(Matrix[lvl - 1], tid); assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]; assert indicator < output.length && indicator >= stride ==> output[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]; assert Matrix[lvl] == implode(Matrix[lvl - 1]); assert indicator < output.length && indicator >= stride ==> output[indicator] == Matrix[lvl][tid]; }*/ /*@ context_everywhere k > 0; context_everywhere 1 <= lvl && lvl <= k; context_everywhere |Matrix| == lvl + 1; requires tid >= 0 && tid < ExpTwo(k); requires stride == ExpTwo(lvl-1); requires stride > 0 && stride < ExpTwo(k); requires indicator + 1 == ExpTwo(lvl)*(tid+1); requires indicator + 1 == 2*stride*(tid+1); requires indicator > 0; requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); ensures tid >= 0 && tid < ExpTwo(k); ensures 2 * stride == ExpTwo(lvl); ensures 2 * stride > 0 && 2 * stride <= ExpTwo(k); ensures 2 * indicator + 2 == ExpTwo(lvl+1)*(tid+1); ensures 2 * indicator + 2 == 2*stride*(tid+1); ensures 2 * indicator + 1 > 0; ensures 2 * indicator + 1 < ExpTwo(lvl) ==> \pointer_index(output, 2 * indicator + 1, 1); ensures 2 * indicator + 1 < ExpTwo(lvl) && 2 * indicator + 1 >= 2 * stride ==> \pointer_index(output, 2 * indicator + 1 - 2 * stride, 1); ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(lvl) && (i + 1) % (2 * stride) != 0; \pointer_index(output, i, 1)); ensures (tid==0 && (2 * stride == ExpTwo(lvl))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); @*/ __syncthreads(); //@ ghost Matrix_UP = Matrix_UP + seq<seq<int> > { up(Matrix_UP[lvl - 1], stride, 0, k, lvl) }; //@ assert (indicator < ExpTwo(k)) && (indicator >= stride) ==> Matrix_UP[lvl][indicator] == Matrix_UP[lvl - 1][indicator] + Matrix_UP[lvl - 1][indicator-stride]; indicator = 2 * indicator + 1; stride = 2 * stride; lvl = lvl + 1; //@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i)); //@ assert stride == ExpTwo(lvl-1); //@ assert lemma_exp2_red_mult(lvl); //@ assert ExpTwo(lvl) == 2 * ExpTwo(lvl - 1); //@ assert 2*stride == ExpTwo(lvl); //@ assert indicator + 1 == ExpTwo(lvl)*(tid+1); //@ assert indicator + 1 == 2*stride*(tid+1); } //@ assert stride == ExpTwo(lvl-1); //@ assert ExpTwo(lvl-1) == ExpTwo(k); //@ assert stride == ExpTwo(k); //@ assert power_two_lemma(lvl-1, k); //@ assert lvl == k + 1; //@ assert indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; //@ assert |Matrix| == lvl; //@ assert (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); //@ assert (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1])); //@ assert (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(input_seq)); //@ assert |Matrix[k]| == 1; //@ assert lemma_intsum_single(Matrix[k][0]); //@ assert intsum(Matrix[k]) == intsum(input_seq); //@ assert Matrix[k] == seq<int>{intsum(input_seq)}; //@ assert Matrix[0] == input_seq; //@ assert (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); ///////////////////////////////////////////////////////////////////////////////// //@ assert indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; /*@ context_everywhere k > 0; context_everywhere |Matrix_UP| == k + 1; context_everywhere |Matrix| == k + 1; context_everywhere lvl == k + 1; context stride == ExpTwo(k); context indicator + 1 == ExpTwo(lvl)*(tid+1); context indicator + 1 == 2*stride*(tid+1); context indicator > 0; context stride > 0 ; requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); requires (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(output, ExpTwo(k) - 1, 1); requires (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k)); requires (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); requires (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator] == output[indicator]; requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == output[indicator - stride]; requires indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> output[indicator - stride] == Matrix[lvl - 1][2 * tid]; context tid >= 0 && tid < ExpTwo(k); //ensures stride == ExpTwo(k) / 2; //ensures indicator == ExpTwo(k) * tid + ExpTwo(k) - 1; //ensures stride > 0 ; //ensures indicator > 0; ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> \pointer_index(output, ExpTwo(k) * \ltid + ExpTwo(k) - 1, 1); ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> \pointer_index(output, ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2, 1); ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (ExpTwo(k) / 2) != 0; \pointer_index(output, i, 1)); ensures (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k)); ensures (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i)); ensures (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1] == output[ExpTwo(k) * \ltid + ExpTwo(k) - 1]; //ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == output[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2]; //ensures 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> output[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid); @*/ __syncthreads(); // (unstability) These come from the last three postconditions in the previous barrier: //@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1] == output[ExpTwo(k) * tid + ExpTwo(k) - 1]; //@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == output[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2]; //@ assume 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> output[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid); /////////////////////////////////////////////////////////////////////////////////////// Down indicator = ExpTwo(k) * tid + ExpTwo(k) - 1; // output.length * tid + output.length - 1; stride = ExpTwo(k) / 2; // output.length / 2; lvl = k - 1; //lvl - 2; int temp; //@ ghost seq<int> temp_seq = seq<int> { 0 }; //@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl + 1][indicator] == output[indicator]; //@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl + 1][indicator - stride] == output[indicator - stride]; if(indicator < ExpTwo(k)) { output[indicator] = 0; } /*@ loop_invariant k > 0; loop_invariant tid >= 0 && tid < ExpTwo(k); loop_invariant lvl <= k - 1; loop_invariant lvl >= -1; loop_invariant lvl >= 0 ==> stride == ExpTwo(lvl); loop_invariant lvl == -1 ==> stride == 0; loop_invariant stride == 0 ==> lvl == -1; loop_invariant stride >= 0; loop_invariant indicator >= 0; loop_invariant indicator+1 == ExpTwo(lvl+1)*(tid+1); loop_invariant indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); loop_invariant lvl >= 0 && indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); loop_invariant (tid==0 && stride > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); //loop_invariant lvl == -1 ==> \pointer_index(output, tid, 1); //loop_invariant lvl == -1 ==> indicator == tid; //loop_invariant indicator == tid ==> lvl == -1; loop_invariant |temp_seq| == ExpTwo(k - (lvl + 1)); loop_invariant 0 < |temp_seq| && |temp_seq| <= ExpTwo(k); loop_invariant temp_seq == psum2(Matrix[lvl + 1]); loop_invariant (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); loop_invariant (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); loop_invariant (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(input_seq)); loop_invariant (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1])); loop_invariant Matrix[0] == input_seq; loop_invariant Matrix[k] == seq<int>{ intsum(input_seq) }; loop_invariant tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == output[indicator]; loop_invariant lvl >= 0 && 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == get(Matrix[lvl], 2 * tid); @*/ while(stride >= 1) { if(indicator < ExpTwo(k) && indicator >= stride) { //@ assert tid < |temp_seq| ==> temp_seq[tid] == output[indicator]; temp = output[indicator]; //@ assert tid < |temp_seq| ==> temp == temp_seq[tid]; output[indicator] = output[indicator] + output[indicator - stride]; //@ assert tid < |temp_seq| ==> output[indicator] == temp_seq[tid] + output[indicator - stride]; //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator - stride] == get(Matrix[lvl], 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> output[indicator] == temp_seq[tid] + get(Matrix[lvl], 2 * tid); //@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid); //@ assert tid < |Matrix[lvl + 1]| && 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl + 1]), tid) + get(Matrix[lvl], 2 * tid); //@ assert Matrix[lvl + 1] == implode(Matrix[lvl]); //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(implode(Matrix[lvl])), tid) + get(Matrix[lvl], 2 * tid); //@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true; /*if(tid < |implode(Matrix[lvl])|){ lemma_get_psum_implode(Matrix[lvl], tid); }*/ //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid); //@ ghost 2 * tid + 1 < |Matrix[lvl]| ? lemma_combine_psum(Matrix[lvl], tid) : true; /*if(2 * tid + 1 < |Matrix[lvl]|){ lemma_combine_psum(Matrix[lvl], tid); }*/ //@ assert 2 * tid + 1 < |Matrix[lvl]| ==> get(psum2(Matrix[lvl]), 2 * tid + 1) == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid); //@ assert 2 * tid + 1 < |Matrix[lvl]| ==> output[indicator] == get(psum2(Matrix[lvl]), 2 * tid + 1); //@ assert tid < |temp_seq| ==> temp == temp_seq[tid]; output[indicator - stride] = temp; //@ assert tid < |temp_seq| ==> output[indicator - stride] == temp_seq[tid]; //@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid); //@ assert Matrix[lvl + 1] == implode(Matrix[lvl]); //@ assert tid < |implode(Matrix[lvl])| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(implode(Matrix[lvl])), tid); //@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true; /*if(tid < |implode(Matrix[lvl])|){ lemma_get_psum_implode(Matrix[lvl], tid); }*/ //@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl]), 2 * tid); //@ assert 2 * tid < |Matrix[lvl]| ==> output[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid); } //@ ghost temp_seq = psum2(Matrix[lvl]); //@ assert 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == temp_seq[2 * tid]; //@ assert 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == temp_seq[2 * tid + 1]; /*@ context_everywhere lvl >= 0 && lvl <= k - 1; requires tid >= 0 && tid < ExpTwo(k); context_everywhere |temp_seq| == ExpTwo(k - lvl); context_everywhere 0 < |temp_seq| && |temp_seq| <= ExpTwo(k); context_everywhere |Matrix| == k + 1; //context lvl - 1 == -1 ==> (indicator - 1) / 2 == \ltid; //context (indicator - 1) / 2 == \ltid ==> lvl - 1 == -1; requires indicator >= 0; requires stride >= 1 ; requires stride == ExpTwo(lvl); requires indicator+1 == ExpTwo(lvl+1)*(\ltid+1); requires indicator < ExpTwo(k) ==> \pointer_index(output, indicator, 1); requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(output, indicator - stride, 1); requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(output, i, 1)); //requires 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == temp_seq[2 * tid]; //requires 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator] == temp_seq[2 * tid + 1]; requires (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); requires (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //requires 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> output[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid); ensures tid >= 0 && tid < ExpTwo(k); ensures lvl-1 >= 0 ==> stride / 2 == ExpTwo(lvl - 1); ensures lvl-1 == -1 ==> stride / 2 == 0; ensures stride / 2 == 0 ==> lvl-1 == -1; ensures stride / 2 >= 0; ensures (indicator - 1) / 2 >= 0; ensures (indicator - 1) / 2+1 == ExpTwo(lvl)*(tid+1); ensures (indicator - 1) / 2 < ExpTwo(k) ==> \pointer_index(output, (indicator - 1) / 2, 1); ensures lvl-1 >= 0 && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> \pointer_index(output, (indicator - 1) / 2 - stride / 2, 1); ensures (tid==0 && stride/2 > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (stride/2) != 0; \pointer_index(output, i, 1)); ensures (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i)); ensures (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k)); //ensures tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == output[(indicator - 1) / 2]; //ensures lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> output[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid); @*/ __syncthreads(); // (unstability) These come from the last two postconditions in the previous barrier: //@ assume tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == output[(indicator - 1) / 2]; //@ assume lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> output[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid); indicator = (indicator - 1) / 2; stride = stride / 2; lvl = lvl - 1; } //@ assert temp_seq == psum2(Matrix[0]); //@ assert Matrix[0] == input_seq; //@ assert temp_seq == psum2(input_seq); //@ assert tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == output[indicator]; } //////////////////////////////////////////////////////////////////////////////// // CUDA Functions //////////////////////////////////////////////////////////////////////////////// //@ ensures \pointer(\result, N, 1); int *vercorsMallocInt(int N); void vercorsFreeInt(int *ar); //@ ensures \pointer(\result, N, 1); int *vercorsCudaMallocInt(int N); void vercorsCudaFreeInt(int *addr); //@ context \pointer(src, N, read) ** \pointer(tgt, N, 1); //@ ensures (\forall int i; i >= 0 && i < N; src[i] == tgt[i]); void vercorsCudaMemcpyInt(int *tgt, int *src, int N, int direction); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int CUDA_Host_Blelloch( int argc, char** argv) { int k = 10; // size of the input is 2^k int* host_input = vercorsMallocInt(ExpTwo(k)); // size of the host_input is 2^k int* host_output = vercorsMallocInt(ExpTwo(k)); // size of the host_output is 2^k //@ loop_invariant k == 10; //@ loop_invariant q >= 0 && q <= ExpTwo(k); //@ loop_invariant \pointer(host_input, ExpTwo(k), 1) ** \pointer(host_output, ExpTwo(k), 1); //@ loop_invariant (\forall int i; i >= 0 && i < q; host_input[i] == host_output[i]); for(int q=0; q<ExpTwo(k); q++) { host_output[q] = host_input[q]; } //Copy the arrays to device memory int* device_output; device_output = vercorsCudaMallocInt(ExpTwo(k)); vercorsCudaMemcpyInt(device_output, host_output, ExpTwo(k), cudaMemcpyHostToDevice) ; //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); host_output[i] == device_output[i]); //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); host_output[i] == host_input[i]); //@ assert (\forall int i; i >= 0 && i < ExpTwo(k); device_output[i] == host_input[i]); //setup execution parameters int num_of_blocks = 1; int num_of_threads_per_block = ExpTwo(k); //Kernel launch CUDA_Kernel_Blelloch<<< /*grid*/num_of_blocks, /*threads*/num_of_threads_per_block/*, 0*/ >>>(device_output, k); // copy result from device to host //vercorsCudaMemcpyInt(host_output, device_output, ExpTwo(k), cudaMemcpyDeviceToHost); // cleanup memory vercorsFreeInt(host_output); vercorsCudaFreeInt(device_output); }
0ac777171b7e17d28d4465cc139b096f0de6b56f.hip
// !!! This is a file automatically generated by hipify!!! /** * Programmation GPU * Universit Pierre et Marie Curie * Calcul de convolution sur une image. */ /** * V0 * */ #include <hip/hip_runtime.h> #include <stdio.h> extern "C" double my_gettimeofday(); /** * Controle des erreurs CUDA et debugging. */ #ifdef CUDA_DEBUG #define CUDA_SYNC_ERROR() { \ hipError_t sync_error; \ hipDeviceSynchronize(); \ sync_error = hipGetLastError(); \ if(sync_error != hipSuccess) { \ fprintf(stderr, "[CUDA SYNC ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, hipGetErrorString(sync_error)); \ exit(EXIT_FAILURE); \ } \ } #else /* #ifdef CUDA_DEBUG */ #define CUDA_SYNC_ERROR() #endif /* #ifdef CUDA_DEBUG */ #define CUDA_ERROR(cuda_call) { \ hipError_t error = cuda_call; \ if(error != hipSuccess){ \ fprintf(stderr, "[CUDA ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, hipGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ CUDA_SYNC_ERROR(); \ } /** * Retourne le quotient entier superieur ou egal a "a/b". * D'apres : CUDA SDK 4.1 */ static int iDivUp(int a, int b){ return ((a % b != 0) ? (a / b + 1) : (a / b)); } __global__ void convolKernel(float* d_buf, float* d_buf_aux, int nbl, int nbc){ int j = blockDim.x*blockIdx.x + threadIdx.x; int i = blockDim.y*blockIdx.y + threadIdx.y; if (i<nbl && j<nbc) { //Copie depuis convol.c //===================== /*** filtre moyenneur CONVOL_MOYENNE2 (filtre moyenneur avec * un poid central plus fort): * Rq: pour les bords, moyenne avec uniquement les cases presentes */ float denominateur = 0.0f; float numerateur = 0.0f; float poids_central; if (i<nbl-1){ numerateur += d_buf[(i+1)*nbc+j]; ++denominateur; if (j>0){ numerateur += d_buf[(i+1)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i+1)*nbc+j+1]; ++denominateur; } } if (j>0){ numerateur += d_buf[(i)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i)*nbc+j+1]; ++denominateur; } if (i>0){ numerateur += d_buf[(i-1)*nbc+j]; ++denominateur; if (j>0){ numerateur += d_buf[(i-1)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i-1)*nbc+j+1]; ++denominateur; } } poids_central = denominateur*0.5f; /* poids central = 50% autres poids */ numerateur += poids_central*d_buf[(i)*nbc+j]; denominateur += poids_central; d_buf_aux[i*nbc+j] = numerateur/denominateur; } } /** * Effectue 'nbiter' convolutions sur GPU et retourne * le pointeur vers le buffer contenant la derniere convolution. */ extern "C" float *gpu_multiples_convolutions(float buf[], float buf_aux[], int nbl, int nbc, int nbiter, int nbThreadsParBloc){ /*** TODO ***/; float *d_buf, *d_buf_aux; int grilleX, grilleY; int taille_alloc = nbc * nbl * sizeof(float); hipMalloc((void **) &d_buf, taille_alloc); hipMalloc((void **) &d_buf_aux, taille_alloc); hipMemcpy(d_buf, buf, taille_alloc, hipMemcpyHostToDevice); hipMemcpy(d_buf_aux, buf_aux, taille_alloc, hipMemcpyHostToDevice); grilleX = ceil((float)nbc/(float)nbThreadsParBloc); grilleY = ceil((float)nbl/(float)nbThreadsParBloc); dim3 threads_par_bloc(nbThreadsParBloc, nbThreadsParBloc); dim3 taille_grille(grilleX, grilleY); int i; for(i=0; i<nbiter; i++){ hipLaunchKernelGGL(( convolKernel), dim3(taille_grille), dim3(threads_par_bloc), 0, 0, d_buf, d_buf_aux, nbl, nbc); hipMemcpy(d_buf, d_buf_aux, taille_alloc, hipMemcpyDeviceToDevice); } hipMemcpy(buf, d_buf, taille_alloc, hipMemcpyDeviceToHost); return buf; }
0ac777171b7e17d28d4465cc139b096f0de6b56f.cu
/** * Programmation GPU * Université Pierre et Marie Curie * Calcul de convolution sur une image. */ /** * V0 * */ #include <cuda.h> #include <stdio.h> extern "C" double my_gettimeofday(); /** * Controle des erreurs CUDA et debugging. */ #ifdef CUDA_DEBUG #define CUDA_SYNC_ERROR() { \ cudaError_t sync_error; \ cudaDeviceSynchronize(); \ sync_error = cudaGetLastError(); \ if(sync_error != cudaSuccess) { \ fprintf(stderr, "[CUDA SYNC ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, cudaGetErrorString(sync_error)); \ exit(EXIT_FAILURE); \ } \ } #else /* #ifdef CUDA_DEBUG */ #define CUDA_SYNC_ERROR() #endif /* #ifdef CUDA_DEBUG */ #define CUDA_ERROR(cuda_call) { \ cudaError_t error = cuda_call; \ if(error != cudaSuccess){ \ fprintf(stderr, "[CUDA ERROR at %s:%d -> %s]\n", \ __FILE__ , __LINE__, cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ CUDA_SYNC_ERROR(); \ } /** * Retourne le quotient entier superieur ou egal a "a/b". * D'apres : CUDA SDK 4.1 */ static int iDivUp(int a, int b){ return ((a % b != 0) ? (a / b + 1) : (a / b)); } __global__ void convolKernel(float* d_buf, float* d_buf_aux, int nbl, int nbc){ int j = blockDim.x*blockIdx.x + threadIdx.x; int i = blockDim.y*blockIdx.y + threadIdx.y; if (i<nbl && j<nbc) { //Copie depuis convol.c //===================== /*** filtre moyenneur CONVOL_MOYENNE2 (filtre moyenneur avec * un poid central plus fort): * Rq: pour les bords, moyenne avec uniquement les cases presentes */ float denominateur = 0.0f; float numerateur = 0.0f; float poids_central; if (i<nbl-1){ numerateur += d_buf[(i+1)*nbc+j]; ++denominateur; if (j>0){ numerateur += d_buf[(i+1)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i+1)*nbc+j+1]; ++denominateur; } } if (j>0){ numerateur += d_buf[(i)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i)*nbc+j+1]; ++denominateur; } if (i>0){ numerateur += d_buf[(i-1)*nbc+j]; ++denominateur; if (j>0){ numerateur += d_buf[(i-1)*nbc+j-1]; ++denominateur; } if (j<nbc-1){ numerateur += d_buf[(i-1)*nbc+j+1]; ++denominateur; } } poids_central = denominateur*0.5f; /* poids central = 50% autres poids */ numerateur += poids_central*d_buf[(i)*nbc+j]; denominateur += poids_central; d_buf_aux[i*nbc+j] = numerateur/denominateur; } } /** * Effectue 'nbiter' convolutions sur GPU et retourne * le pointeur vers le buffer contenant la derniere convolution. */ extern "C" float *gpu_multiples_convolutions(float buf[], float buf_aux[], int nbl, int nbc, int nbiter, int nbThreadsParBloc){ /*** TODO ***/; float *d_buf, *d_buf_aux; int grilleX, grilleY; int taille_alloc = nbc * nbl * sizeof(float); cudaMalloc((void **) &d_buf, taille_alloc); cudaMalloc((void **) &d_buf_aux, taille_alloc); cudaMemcpy(d_buf, buf, taille_alloc, cudaMemcpyHostToDevice); cudaMemcpy(d_buf_aux, buf_aux, taille_alloc, cudaMemcpyHostToDevice); grilleX = ceil((float)nbc/(float)nbThreadsParBloc); grilleY = ceil((float)nbl/(float)nbThreadsParBloc); dim3 threads_par_bloc(nbThreadsParBloc, nbThreadsParBloc); dim3 taille_grille(grilleX, grilleY); int i; for(i=0; i<nbiter; i++){ convolKernel<<<taille_grille, threads_par_bloc>>>(d_buf, d_buf_aux, nbl, nbc); cudaMemcpy(d_buf, d_buf_aux, taille_alloc, cudaMemcpyDeviceToDevice); } cudaMemcpy(buf, d_buf, taille_alloc, cudaMemcpyDeviceToHost); return buf; }
7e674cd1a16ca7e98d605ae4cb6fdfaa012d85fa.hip
// !!! This is a file automatically generated by hipify!!! // // Created by depaulsmiller on 1/15/21. // #include <iostream> #include <thread> #include <unistd.h> #include <atomic> #include <kvcg.cuh> #include <vector> #include <zipf.hh> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> #include <set> namespace pt = boost::property_tree; using BatchWrapper = std::vector<RequestWrapper<unsigned long long, data_t *>>; struct SelfSimilarWorkloadConfig { SelfSimilarWorkloadConfig() { range = 1000000000; n = 10000; ops = 100000000; keysize = 8; ratio = 95; h = 0.5; } SelfSimilarWorkloadConfig(std::string filename) { pt::ptree root; pt::read_json(filename, root); h = root.get<double>("h", 0.1); range = root.get<int>("range", 10000000); n = root.get<int>("n", 10000); ops = root.get<int>("ops", 10000); keysize = root.get<size_t>("keysize", 8); ratio = root.get<int>("ratio", 95); } ~SelfSimilarWorkloadConfig() {} double h; int range; int n; int ops; size_t keysize; int ratio; }; SelfSimilarWorkloadConfig workloadConfig; float rand_0_1(unsigned *seed) { return rand_r(seed) / (float) RAND_MAX; } unsigned long long selfsimilar(long range, double h, unsigned *seed) { return (1 + (unsigned long long) (range * ::pow(rand_0_1(seed), (log(h) / log(1.0 - h))))); }; extern "C" int getBatchesToRun() { return workloadConfig.n; } extern "C" void initWorkload() { } extern "C" void initWorkloadFile(std::string filename) { workloadConfig = SelfSimilarWorkloadConfig(filename); } std::vector<RequestWrapper<unsigned long long, data_t *>> generateWorkloadLargeKey(size_t keySize, int size, int range, unsigned *seed, int ratioOfReads, double h) { std::vector<RequestWrapper<unsigned long long, data_t *>> vec; for (int i = 0; i < size; i++) { if (rand_r(seed) % 100 < ratioOfReads) { vec.push_back({selfsimilar(range, h, seed), nullptr, REQUEST_GET}); } else { if (rand_r(seed) % 100 < 50) { vec.push_back({selfsimilar(range, h, seed), new data_t(keySize), REQUEST_INSERT}); } else { vec.push_back( {selfsimilar(range, h, seed), nullptr, REQUEST_REMOVE}); } } } return vec; } extern "C" BatchWrapper generateWorkloadBatch(unsigned int *seed, unsigned batchsize) { return generateWorkloadLargeKey(workloadConfig.keysize, batchsize, workloadConfig.range, seed, workloadConfig.ratio, workloadConfig.h); } extern "C" std::vector<BatchWrapper> getPopulationBatches(unsigned int *seed, unsigned batchsize) { std::set<unsigned long long> keys; if (workloadConfig.range < workloadConfig.n) { exit(1); } while (keys.size() < workloadConfig.n) { keys.insert((rand_r(seed) % workloadConfig.range) + 1); } std::vector<BatchWrapper> batches; auto iter = keys.begin(); while (iter != keys.end()) { std::vector<RequestWrapper<unsigned long long, data_t *>> vec; for (int i = 0; i < batchsize && iter != keys.end(); i++) { vec.push_back({*iter, new data_t(workloadConfig.keysize), REQUEST_INSERT}); ++iter; } batches.push_back(vec); } return batches; }
7e674cd1a16ca7e98d605ae4cb6fdfaa012d85fa.cu
// // Created by depaulsmiller on 1/15/21. // #include <iostream> #include <thread> #include <unistd.h> #include <atomic> #include <kvcg.cuh> #include <vector> #include <zipf.hh> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> #include <set> namespace pt = boost::property_tree; using BatchWrapper = std::vector<RequestWrapper<unsigned long long, data_t *>>; struct SelfSimilarWorkloadConfig { SelfSimilarWorkloadConfig() { range = 1000000000; n = 10000; ops = 100000000; keysize = 8; ratio = 95; h = 0.5; } SelfSimilarWorkloadConfig(std::string filename) { pt::ptree root; pt::read_json(filename, root); h = root.get<double>("h", 0.1); range = root.get<int>("range", 10000000); n = root.get<int>("n", 10000); ops = root.get<int>("ops", 10000); keysize = root.get<size_t>("keysize", 8); ratio = root.get<int>("ratio", 95); } ~SelfSimilarWorkloadConfig() {} double h; int range; int n; int ops; size_t keysize; int ratio; }; SelfSimilarWorkloadConfig workloadConfig; float rand_0_1(unsigned *seed) { return rand_r(seed) / (float) RAND_MAX; } unsigned long long selfsimilar(long range, double h, unsigned *seed) { return (1 + (unsigned long long) (range * std::pow(rand_0_1(seed), (log(h) / log(1.0 - h))))); }; extern "C" int getBatchesToRun() { return workloadConfig.n; } extern "C" void initWorkload() { } extern "C" void initWorkloadFile(std::string filename) { workloadConfig = SelfSimilarWorkloadConfig(filename); } std::vector<RequestWrapper<unsigned long long, data_t *>> generateWorkloadLargeKey(size_t keySize, int size, int range, unsigned *seed, int ratioOfReads, double h) { std::vector<RequestWrapper<unsigned long long, data_t *>> vec; for (int i = 0; i < size; i++) { if (rand_r(seed) % 100 < ratioOfReads) { vec.push_back({selfsimilar(range, h, seed), nullptr, REQUEST_GET}); } else { if (rand_r(seed) % 100 < 50) { vec.push_back({selfsimilar(range, h, seed), new data_t(keySize), REQUEST_INSERT}); } else { vec.push_back( {selfsimilar(range, h, seed), nullptr, REQUEST_REMOVE}); } } } return vec; } extern "C" BatchWrapper generateWorkloadBatch(unsigned int *seed, unsigned batchsize) { return generateWorkloadLargeKey(workloadConfig.keysize, batchsize, workloadConfig.range, seed, workloadConfig.ratio, workloadConfig.h); } extern "C" std::vector<BatchWrapper> getPopulationBatches(unsigned int *seed, unsigned batchsize) { std::set<unsigned long long> keys; if (workloadConfig.range < workloadConfig.n) { exit(1); } while (keys.size() < workloadConfig.n) { keys.insert((rand_r(seed) % workloadConfig.range) + 1); } std::vector<BatchWrapper> batches; auto iter = keys.begin(); while (iter != keys.end()) { std::vector<RequestWrapper<unsigned long long, data_t *>> vec; for (int i = 0; i < batchsize && iter != keys.end(); i++) { vec.push_back({*iter, new data_t(workloadConfig.keysize), REQUEST_INSERT}); ++iter; } batches.push_back(vec); } return batches; }
9b8bc81c25d4aa04bd922b72338c2e844bf8afd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/funcs/concat_and_split_functor.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/cuda_graph_with_memory_pool.h" namespace phi { namespace funcs { template <typename T> __global__ void ConcatKernel_(const T** inputs, const int64_t* input_cols, int col_size, const int64_t output_rows, const int64_t output_cols, T* output) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = input_cols[0]; for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = input_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = input_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; const T* input_ptr = inputs[curr_segment]; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y) output[tid_y * output_cols + tid_x] = input_ptr[tid_y * segment_width + local_col]; } } template <typename T> __device__ void ConcatKernelDetail(const T** inputs_data, const int fixed_in_col, const int out_rows, const int out_cols, T* output_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) { int split = tid_x * 1.0 / fixed_in_col; int in_offset = tid_x - split * fixed_in_col; const T* input_ptr = inputs_data[split]; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) { output_data[tid_y * out_cols + tid_x] = input_ptr[tid_y * fixed_in_col + in_offset]; } } } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[2]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const T* input_addr2, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[3]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; inputs_data[2] = input_addr2; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const T* input_addr2, const T* input_addr3, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[4]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; inputs_data[2] = input_addr2; inputs_data[3] = input_addr3; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T** inputs_data, const int in_num, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t* out_cols, int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = out_cols[0]; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = out_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = out_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; T* output_ptr = outputs_data[curr_segment]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * segment_width + local_col] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __device__ void SplitKernelDetail(const T* input_data, const int in_row, const int in_col, const int fixed_out_col, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int split = tid_x / fixed_out_col; int in_offset = tid_x - split * fixed_out_col; T* output_ptr = outputs_data[split]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * fixed_out_col + in_offset] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T** outputs_data) { SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1) { T* outputs_data[2]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1, T* outputs_addr2) { T* outputs_data[3]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; outputs_data[2] = outputs_addr2; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1, T* outputs_addr2, T* outputs_addr3) { T* outputs_data[4]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; outputs_data[2] = outputs_addr2; outputs_data[3] = outputs_addr3; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } static inline void GetBlockDims(const phi::GPUContext& context, int64_t num_rows, int64_t num_cols, dim3* block_dims, dim3* grid_dims) { // Set the thread block and grid according to CurrentDeviceId const int kThreadsPerBlock = 1024; int block_cols = kThreadsPerBlock; if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32. block_cols = ((num_cols + 31) >> 5) << 5; } int block_rows = kThreadsPerBlock / block_cols; *block_dims = dim3(block_cols, block_rows, 1); int max_threads = context.GetMaxPhysicalThreadCount(); int64_t max_blocks = ::max(max_threads / kThreadsPerBlock, 1); int grid_cols = ::min((num_cols + block_cols - 1) / block_cols, max_blocks); int grid_rows = ::min(max_blocks / grid_cols, ::max(num_rows / block_rows, (int64_t)1)); *grid_dims = dim3(grid_cols, grid_rows, 1); } /* * All tensors' dimension should be the same and the values of * each dimension must be the same, except the axis dimension. */ template <typename T> struct ConcatFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const std::vector<phi::DenseTensor>& input, int axis, phi::DenseTensor* output) { // TODO(zcd): Add input data validity checking int in_num = input.size(); int64_t in_row = 1; auto dim_0 = input[0].dims(); for (int i = 0; i < axis; ++i) { in_row *= dim_0[i]; } int64_t in_col = input[0].numel() / in_row; int64_t out_row = in_row, out_col = 0; int inputs_col_num = in_num + 1; std::vector<const T*> inputs_data_vec(in_num); std::vector<int64_t> inputs_col_vec(inputs_col_num); const T** inputs_data = inputs_data_vec.data(); int64_t* inputs_col = inputs_col_vec.data(); // There are some differences between hip runtime and NV runtime. // In NV, when the pageable memory data less than 64K is transferred from // hosttodevice, it will be automatically asynchronous. // However, only pinned memory in hip can copy asynchronously // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device // 3.2.6.1. Concurrent Execution between Host and Device // Memory copies from host to device of a memory block of 64 KB or less #ifdef PADDLE_WITH_HIP paddle::memory::AllocationPtr data_alloc, col_alloc; // TODO(chentianyu03): try to find a method to remove the Alloc function data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), in_num * sizeof(T*)); inputs_data = reinterpret_cast<const T**>(data_alloc->ptr()); // TODO(chentianyu03): try to find a method to remove the Alloc function col_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), inputs_col_num * sizeof(int)); inputs_col = reinterpret_cast<int64_t*>(col_alloc->ptr()); #endif inputs_col[0] = 0; bool has_same_shape = true; for (int i = 0; i < in_num; ++i) { int64_t t_cols = input[i].numel() / in_row; if (has_same_shape) { if (t_cols != in_col) has_same_shape = false; } out_col += t_cols; inputs_col[i + 1] = out_col; inputs_data[i] = input[i].data<T>(); } dim3 block_dims; dim3 grid_dims; GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims); paddle::memory::allocation::AllocationPtr tmp_dev_ins_data; const T** dev_ins_data = nullptr; if (!has_same_shape || in_num < 2 || in_num > 4) { tmp_dev_ins_data = paddle::memory::Alloc(context, in_num * sizeof(T*)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( inputs_data, in_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_data->ptr(), paddle::platform::CPUPlace(), restored, in_num * sizeof(T*), context.stream()); dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr()); } if (has_same_shape) { if (in_num == 2) { hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), inputs_data[0], inputs_data[1], in_col, out_row, out_col, output->data<T>()); } else if (in_num == 3) { hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), inputs_data[0], inputs_data[1], inputs_data[2], in_col, out_row, out_col, output->data<T>()); } else if (in_num == 4) { hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), inputs_data[0], inputs_data[1], inputs_data[2], inputs_data[3], in_col, out_row, out_col, output->data<T>()); } else { hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>()); } } else { auto tmp_dev_ins_col_data = paddle::memory::Alloc(context, inputs_col_num * sizeof(int64_t)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( inputs_col, inputs_col_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_col_data->ptr(), paddle::platform::CPUPlace(), restored, inputs_col_num * sizeof(int64_t), context.stream()); int64_t* dev_ins_col_data = static_cast<int64_t*>(tmp_dev_ins_col_data->ptr()); hipLaunchKernelGGL(( ConcatKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), dev_ins_data, dev_ins_col_data, static_cast<int>(inputs_col_num), out_row, out_col, output->data<T>()); } #ifdef PADDLE_WITH_HIP // Prevent the pinned memory value from being covered and release the memory // after the launch kernel of the stream is executed (reapply pinned memory // next time) auto* data_alloc_released = data_alloc.release(); auto* col_alloc_released = col_alloc.release(); context.AddStreamCallback([data_alloc_released, col_alloc_released] { VLOG(4) << "Delete cuda pinned at " << data_alloc_released; VLOG(4) << "Delete cuda pinned at " << col_alloc_released; paddle::memory::allocation::Allocator::AllocationDeleter( data_alloc_released); paddle::memory::allocation::Allocator::AllocationDeleter( col_alloc_released); }); #endif } }; template <typename T> class SplitFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const phi::DenseTensor& input, const std::vector<const phi::DenseTensor*>& ref_inputs, int axis, std::vector<phi::DenseTensor*>* outputs) { // NOTE(zhiqiu): split a tensor of shape [0,3,4] at axis=1, result in 3 // tensors of shape [0,1,4] if (input.numel() == 0) { return; } // TODO(zcd): Add input data validity checking int o_num = outputs->size(); int64_t out_row = 1; auto dim_0 = ref_inputs[0]->dims(); for (int i = 0; i < axis; ++i) { out_row *= dim_0[i]; } int64_t out0_col = ref_inputs[0]->numel() / out_row; int64_t in_col = 0, in_row = out_row; bool has_same_shape = true; int outputs_cols_num = o_num + 1; std::vector<T*> outputs_data_vec(o_num); std::vector<int64_t> outputs_cols_vec(outputs_cols_num); T** outputs_data = outputs_data_vec.data(); int64_t* outputs_cols = outputs_cols_vec.data(); // There are some differences between hip runtime and NV runtime. // In NV, when the pageable memory data less than 64K is transferred from // hosttodevice, it will be automatically asynchronous. // However, only pinned memory in hip can copy asynchronously // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device // 3.2.6.1. Concurrent Execution between Host and Device // Memory copies from host to device of a memory block of 64 KB or less #ifdef PADDLE_WITH_HIP paddle::memory::AllocationPtr data_alloc, cols_alloc; // TODO(chentianyu03): try to find a method to remove the Alloc function data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), o_num * sizeof(T*)); outputs_data = reinterpret_cast<T**>(data_alloc->ptr()); // TODO(chentianyu03): try to find a method to remove the Alloc function cols_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), (outputs_cols_num) * sizeof(int64_t)); outputs_cols = reinterpret_cast<int64_t*>(cols_alloc->ptr()); #endif outputs_cols[0] = 0; for (int i = 0; i < o_num; ++i) { int64_t t_col = ref_inputs.at(i)->numel() / out_row; if (has_same_shape) { if (t_col != out0_col) has_same_shape = false; } in_col += t_col; outputs_cols[i + 1] = in_col; if (outputs->at(i) != nullptr) { outputs_data[i] = outputs->at(i)->data<T>(); } else { outputs_data[i] = nullptr; } } dim3 block_dims; dim3 grid_dims; GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims); paddle::memory::allocation::AllocationPtr tmp_dev_outs_data; T** dev_out_gpu_data = nullptr; if (!has_same_shape || o_num < 2 || o_num > 4) { // TODO(chentianyu03): try to find a method to remove the Alloc function tmp_dev_outs_data = paddle::memory::Alloc(context, o_num * sizeof(T*)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( outputs_data, o_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_outs_data->ptr(), paddle::platform::CPUPlace(), restored, o_num * sizeof(T*), context.stream()); dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr()); } if (has_same_shape) { if (o_num == 2) { hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1]); } else if (o_num == 3) { hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1], outputs_data[2]); } else if (o_num == 4) { hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1], outputs_data[2], outputs_data[3]); } else { hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data); } } else { auto tmp_dev_ins_col_data = // TODO(chentianyu03): try to find a method to remove the Alloc // function paddle::memory::Alloc(context, outputs_cols_num * sizeof(int64_t)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( outputs_cols, outputs_cols_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_col_data->ptr(), paddle::platform::CPUPlace(), restored, outputs_cols_num * sizeof(int64_t), context.stream()); int64_t* dev_outs_col_data = reinterpret_cast<int64_t*>(tmp_dev_ins_col_data->ptr()); hipLaunchKernelGGL(( SplitKernel_), dim3(grid_dims), dim3(block_dims), 0, context.stream(), input.data<T>(), in_row, in_col, dev_outs_col_data, static_cast<int>(outputs_cols_num), dev_out_gpu_data); } #ifdef PADDLE_WITH_HIP // Prevent the pinned memory value from being covered and release the memory // after the launch kernel of the stream is executed (reapply pinned memory // next time) auto* data_alloc_released = data_alloc.release(); auto* cols_alloc_released = cols_alloc.release(); context.AddStreamCallback([data_alloc_released, cols_alloc_released] { paddle::memory::allocation::Allocator::AllocationDeleter( data_alloc_released); paddle::memory::allocation::Allocator::AllocationDeleter( cols_alloc_released); }); #endif } }; #define DEFINE_FUNCTOR(type) \ template class ConcatFunctor<phi::GPUContext, type>; \ template class SplitFunctor<phi::GPUContext, type> FOR_ALL_TYPES(DEFINE_FUNCTOR); } // namespace funcs } // namespace phi
9b8bc81c25d4aa04bd922b72338c2e844bf8afd4.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/funcs/concat_and_split_functor.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/cuda_graph_with_memory_pool.h" namespace phi { namespace funcs { template <typename T> __global__ void ConcatKernel_(const T** inputs, const int64_t* input_cols, int col_size, const int64_t output_rows, const int64_t output_cols, T* output) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = input_cols[0]; for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = input_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = input_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; const T* input_ptr = inputs[curr_segment]; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y) output[tid_y * output_cols + tid_x] = input_ptr[tid_y * segment_width + local_col]; } } template <typename T> __device__ void ConcatKernelDetail(const T** inputs_data, const int fixed_in_col, const int out_rows, const int out_cols, T* output_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) { int split = tid_x * 1.0 / fixed_in_col; int in_offset = tid_x - split * fixed_in_col; const T* input_ptr = inputs_data[split]; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) { output_data[tid_y * out_cols + tid_x] = input_ptr[tid_y * fixed_in_col + in_offset]; } } } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[2]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const T* input_addr2, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[3]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; inputs_data[2] = input_addr2; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T* input_addr0, const T* input_addr1, const T* input_addr2, const T* input_addr3, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { const T* inputs_data[4]; inputs_data[0] = input_addr0; inputs_data[1] = input_addr1; inputs_data[2] = input_addr2; inputs_data[3] = input_addr3; ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void ConcatKernel_(const T** inputs_data, const int in_num, const int64_t fixed_in_col, const int64_t out_rows, const int64_t out_cols, T* output_data) { ConcatKernelDetail<T>( inputs_data, fixed_in_col, out_rows, out_cols, output_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t* out_cols, int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = out_cols[0]; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = out_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = out_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; T* output_ptr = outputs_data[curr_segment]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * segment_width + local_col] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __device__ void SplitKernelDetail(const T* input_data, const int in_row, const int in_col, const int fixed_out_col, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int split = tid_x / fixed_out_col; int in_offset = tid_x - split * fixed_out_col; T* output_ptr = outputs_data[split]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * fixed_out_col + in_offset] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T** outputs_data) { SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1) { T* outputs_data[2]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1, T* outputs_addr2) { T* outputs_data[3]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; outputs_data[2] = outputs_addr2; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } template <typename T> __global__ void SplitKernel_(const T* input_data, const int64_t in_row, const int64_t in_col, const int64_t fixed_out_col, T* outputs_addr0, T* outputs_addr1, T* outputs_addr2, T* outputs_addr3) { T* outputs_data[4]; outputs_data[0] = outputs_addr0; outputs_data[1] = outputs_addr1; outputs_data[2] = outputs_addr2; outputs_data[3] = outputs_addr3; SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data); } static inline void GetBlockDims(const phi::GPUContext& context, int64_t num_rows, int64_t num_cols, dim3* block_dims, dim3* grid_dims) { // Set the thread block and grid according to CurrentDeviceId const int kThreadsPerBlock = 1024; int block_cols = kThreadsPerBlock; if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32. block_cols = ((num_cols + 31) >> 5) << 5; } int block_rows = kThreadsPerBlock / block_cols; *block_dims = dim3(block_cols, block_rows, 1); int max_threads = context.GetMaxPhysicalThreadCount(); int64_t max_blocks = std::max(max_threads / kThreadsPerBlock, 1); int grid_cols = std::min((num_cols + block_cols - 1) / block_cols, max_blocks); int grid_rows = std::min(max_blocks / grid_cols, std::max(num_rows / block_rows, (int64_t)1)); *grid_dims = dim3(grid_cols, grid_rows, 1); } /* * All tensors' dimension should be the same and the values of * each dimension must be the same, except the axis dimension. */ template <typename T> struct ConcatFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext& context, const std::vector<phi::DenseTensor>& input, int axis, phi::DenseTensor* output) { // TODO(zcd): Add input data validity checking int in_num = input.size(); int64_t in_row = 1; auto dim_0 = input[0].dims(); for (int i = 0; i < axis; ++i) { in_row *= dim_0[i]; } int64_t in_col = input[0].numel() / in_row; int64_t out_row = in_row, out_col = 0; int inputs_col_num = in_num + 1; std::vector<const T*> inputs_data_vec(in_num); std::vector<int64_t> inputs_col_vec(inputs_col_num); const T** inputs_data = inputs_data_vec.data(); int64_t* inputs_col = inputs_col_vec.data(); // There are some differences between hip runtime and NV runtime. // In NV, when the pageable memory data less than 64K is transferred from // hosttodevice, it will be automatically asynchronous. // However, only pinned memory in hip can copy asynchronously // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device // 3.2.6.1. Concurrent Execution between Host and Device // Memory copies from host to device of a memory block of 64 KB or less #ifdef PADDLE_WITH_HIP paddle::memory::AllocationPtr data_alloc, col_alloc; // TODO(chentianyu03): try to find a method to remove the Alloc function data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), in_num * sizeof(T*)); inputs_data = reinterpret_cast<const T**>(data_alloc->ptr()); // TODO(chentianyu03): try to find a method to remove the Alloc function col_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), inputs_col_num * sizeof(int)); inputs_col = reinterpret_cast<int64_t*>(col_alloc->ptr()); #endif inputs_col[0] = 0; bool has_same_shape = true; for (int i = 0; i < in_num; ++i) { int64_t t_cols = input[i].numel() / in_row; if (has_same_shape) { if (t_cols != in_col) has_same_shape = false; } out_col += t_cols; inputs_col[i + 1] = out_col; inputs_data[i] = input[i].data<T>(); } dim3 block_dims; dim3 grid_dims; GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims); paddle::memory::allocation::AllocationPtr tmp_dev_ins_data; const T** dev_ins_data = nullptr; if (!has_same_shape || in_num < 2 || in_num > 4) { tmp_dev_ins_data = paddle::memory::Alloc(context, in_num * sizeof(T*)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( inputs_data, in_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_data->ptr(), paddle::platform::CPUPlace(), restored, in_num * sizeof(T*), context.stream()); dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr()); } if (has_same_shape) { if (in_num == 2) { ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( inputs_data[0], inputs_data[1], in_col, out_row, out_col, output->data<T>()); } else if (in_num == 3) { ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( inputs_data[0], inputs_data[1], inputs_data[2], in_col, out_row, out_col, output->data<T>()); } else if (in_num == 4) { ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( inputs_data[0], inputs_data[1], inputs_data[2], inputs_data[3], in_col, out_row, out_col, output->data<T>()); } else { ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>()); } } else { auto tmp_dev_ins_col_data = paddle::memory::Alloc(context, inputs_col_num * sizeof(int64_t)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( inputs_col, inputs_col_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_col_data->ptr(), paddle::platform::CPUPlace(), restored, inputs_col_num * sizeof(int64_t), context.stream()); int64_t* dev_ins_col_data = static_cast<int64_t*>(tmp_dev_ins_col_data->ptr()); ConcatKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( dev_ins_data, dev_ins_col_data, static_cast<int>(inputs_col_num), out_row, out_col, output->data<T>()); } #ifdef PADDLE_WITH_HIP // Prevent the pinned memory value from being covered and release the memory // after the launch kernel of the stream is executed (reapply pinned memory // next time) auto* data_alloc_released = data_alloc.release(); auto* col_alloc_released = col_alloc.release(); context.AddStreamCallback([data_alloc_released, col_alloc_released] { VLOG(4) << "Delete cuda pinned at " << data_alloc_released; VLOG(4) << "Delete cuda pinned at " << col_alloc_released; paddle::memory::allocation::Allocator::AllocationDeleter( data_alloc_released); paddle::memory::allocation::Allocator::AllocationDeleter( col_alloc_released); }); #endif } }; template <typename T> class SplitFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const phi::DenseTensor& input, const std::vector<const phi::DenseTensor*>& ref_inputs, int axis, std::vector<phi::DenseTensor*>* outputs) { // NOTE(zhiqiu): split a tensor of shape [0,3,4] at axis=1, result in 3 // tensors of shape [0,1,4] if (input.numel() == 0) { return; } // TODO(zcd): Add input data validity checking int o_num = outputs->size(); int64_t out_row = 1; auto dim_0 = ref_inputs[0]->dims(); for (int i = 0; i < axis; ++i) { out_row *= dim_0[i]; } int64_t out0_col = ref_inputs[0]->numel() / out_row; int64_t in_col = 0, in_row = out_row; bool has_same_shape = true; int outputs_cols_num = o_num + 1; std::vector<T*> outputs_data_vec(o_num); std::vector<int64_t> outputs_cols_vec(outputs_cols_num); T** outputs_data = outputs_data_vec.data(); int64_t* outputs_cols = outputs_cols_vec.data(); // There are some differences between hip runtime and NV runtime. // In NV, when the pageable memory data less than 64K is transferred from // hosttodevice, it will be automatically asynchronous. // However, only pinned memory in hip can copy asynchronously // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#concurrent-execution-host-device // 3.2.6.1. Concurrent Execution between Host and Device // Memory copies from host to device of a memory block of 64 KB or less #ifdef PADDLE_WITH_HIP paddle::memory::AllocationPtr data_alloc, cols_alloc; // TODO(chentianyu03): try to find a method to remove the Alloc function data_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), o_num * sizeof(T*)); outputs_data = reinterpret_cast<T**>(data_alloc->ptr()); // TODO(chentianyu03): try to find a method to remove the Alloc function cols_alloc = paddle::memory::Alloc(paddle::platform::CUDAPinnedPlace(), (outputs_cols_num) * sizeof(int64_t)); outputs_cols = reinterpret_cast<int64_t*>(cols_alloc->ptr()); #endif outputs_cols[0] = 0; for (int i = 0; i < o_num; ++i) { int64_t t_col = ref_inputs.at(i)->numel() / out_row; if (has_same_shape) { if (t_col != out0_col) has_same_shape = false; } in_col += t_col; outputs_cols[i + 1] = in_col; if (outputs->at(i) != nullptr) { outputs_data[i] = outputs->at(i)->data<T>(); } else { outputs_data[i] = nullptr; } } dim3 block_dims; dim3 grid_dims; GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims); paddle::memory::allocation::AllocationPtr tmp_dev_outs_data; T** dev_out_gpu_data = nullptr; if (!has_same_shape || o_num < 2 || o_num > 4) { // TODO(chentianyu03): try to find a method to remove the Alloc function tmp_dev_outs_data = paddle::memory::Alloc(context, o_num * sizeof(T*)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( outputs_data, o_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_outs_data->ptr(), paddle::platform::CPUPlace(), restored, o_num * sizeof(T*), context.stream()); dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr()); } if (has_same_shape) { if (o_num == 2) { SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1]); } else if (o_num == 3) { SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1], outputs_data[2]); } else if (o_num == 4) { SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( input.data<T>(), in_row, in_col, out0_col, outputs_data[0], outputs_data[1], outputs_data[2], outputs_data[3]); } else { SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data); } } else { auto tmp_dev_ins_col_data = // TODO(chentianyu03): try to find a method to remove the Alloc // function paddle::memory::Alloc(context, outputs_cols_num * sizeof(int64_t)); auto* restored = paddle::platform::RestoreHostMemIfCapturingCUDAGraph( outputs_cols, outputs_cols_num); paddle::memory::Copy(context.GetPlace(), tmp_dev_ins_col_data->ptr(), paddle::platform::CPUPlace(), restored, outputs_cols_num * sizeof(int64_t), context.stream()); int64_t* dev_outs_col_data = reinterpret_cast<int64_t*>(tmp_dev_ins_col_data->ptr()); SplitKernel_<<<grid_dims, block_dims, 0, context.stream()>>>( input.data<T>(), in_row, in_col, dev_outs_col_data, static_cast<int>(outputs_cols_num), dev_out_gpu_data); } #ifdef PADDLE_WITH_HIP // Prevent the pinned memory value from being covered and release the memory // after the launch kernel of the stream is executed (reapply pinned memory // next time) auto* data_alloc_released = data_alloc.release(); auto* cols_alloc_released = cols_alloc.release(); context.AddStreamCallback([data_alloc_released, cols_alloc_released] { paddle::memory::allocation::Allocator::AllocationDeleter( data_alloc_released); paddle::memory::allocation::Allocator::AllocationDeleter( cols_alloc_released); }); #endif } }; #define DEFINE_FUNCTOR(type) \ template class ConcatFunctor<phi::GPUContext, type>; \ template class SplitFunctor<phi::GPUContext, type> FOR_ALL_TYPES(DEFINE_FUNCTOR); } // namespace funcs } // namespace phi
5164feaa068e9ba958b46c1f0f2e1b008ef698d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __HC128_KERNEL_CU__ #define __HC128_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define mod(a,b) (((a)>=(b))?((a)-(b)):((a)<0)?((a)+(b)):(a)) #define lprintf(...) ; #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define rotr32(v,n) rotl32(v,32-(n)) #define DEFINE16(L) \ u32 L##0, L##1, L##2, L##3, L##4, L##5, L##6, L##7, L##8, L##9, L##10, L##11, L##12, L##13, L##14, L##15; #define T(L) (__mem(g_T,tID,(L),nr_streams)) #define W(L) __mem(s_W,threadIdx.x,mod((L),17),blockDim.x) extern __shared__ __align__ (__alignof(void*)) u32 smem_cache[]; #define P(L) (__mem(g_P,tID,(L),nr_streams)) #define Q(L) (__mem(g_Q,tID,(L),nr_streams)) #define f1(x) ((rotr32((x), 7)) ^ (rotr32((x),18)) ^ ((x)>> 3)) #define f2(x) ((rotr32((x),17)) ^ (rotr32((x),19)) ^ ((x)>>10)) #define g1(x,y,z) ((rotr32((x),10)^rotr32((z),23))+rotr32((y), 8)) #define g2(x,y,z) ((rotl32((x),10)^rotl32((z),23))+rotl32((y), 8)) #define h1(x) (Q(((x)&0xff)) + Q((256+(((x)>>16)&0xff)))) #define h2(x) (P(((x)&0xff)) + P((256+(((x)>>16)&0xff)))) __global__ void HC128_keyivsetup(u32* g_P, u32* g_Q, u32 *keys, u32 key_size, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; int i,j; u32* s_W=(u32*) smem_cache; u32 k0,k1,k2,k3; u32 i0,i1,i2,i3; k0 = __mem(keys,tID,0,nr_streams); i0 = __mem(ivs,tID,0,nr_streams); k1 = __mem(keys,tID,1,nr_streams); i1 = __mem(ivs,tID,1,nr_streams); k2 = __mem(keys,tID,2,nr_streams); i2 = __mem(ivs,tID,2,nr_streams); k3 = __mem(keys,tID,3,nr_streams); i3 = __mem(ivs,tID,3,nr_streams); W(0)=W(4)=k0; W(1)=W(5)=k1; W(2)=W(6)=k2; W(3)=W(7)=k3; W( 8)=W(12)=i0; W( 9)=W(13)=i1; W(10)=W(14)=i2; W(11)=W(15)=i3; for(j=16,i=16;i<256; j=mod((j+1),17), i++) { W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i; } for(i=0;i<512; j=mod((j+1),17), i++) { P(i) = W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i + 256; } for(i=0;i<512; j=mod((j+1),17), i++) { Q(i) = W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i + 768; } for(i=0;i<512;i++) { int x = mod((i- 3),512), y = mod((i- 10),512), z = mod((i-511),512), w = mod((i- 12),512); P(i) = ( P(i) + g1(P(x),P(y),P(z)) ) ^ h1(P(w)); } for(i=0;i<512;i++) { int x = mod((i- 3),512), y = mod((i- 10),512), z = mod((i-511),512), w = mod((i- 12),512); Q(i) = ( Q(i) + g2(Q(x),Q(y),Q(z)) ) ^ h2(Q(w)); } } __global__ void HC128_process_bytes(gSTREAM_action act, u32* g_P, u32 *g_Q, u32 *buff, u32 nr_words_done, u32 nr_words) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; for(int wordno=nr_words_done;wordno<nr_words;wordno++) { u32 output_word=0; if(act!=GEN_KEYSTREAM) { output_word=__mem(buff,tID,wordno,nr_streams); } int j = wordno & 0x1ff; int x = mod((j- 3),512), y = mod((j- 10),512), z = mod((j-511),512), w = mod((j- 12),512); if((wordno&0x3ff)<512) { P(j)+=g1(P(x),P(y),P(z)); output_word=h1(P(w))^P(j); } else { Q(j)+=g2(Q(x),Q(y),Q(z)); output_word=h2(Q(w))^Q(j); } __mem(buff,tID,wordno,nr_streams)=output_word; } } #endif
5164feaa068e9ba958b46c1f0f2e1b008ef698d0.cu
#ifndef __HC128_KERNEL_CU__ #define __HC128_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define mod(a,b) (((a)>=(b))?((a)-(b)):((a)<0)?((a)+(b)):(a)) #define lprintf(...) ; #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define rotr32(v,n) rotl32(v,32-(n)) #define DEFINE16(L) \ u32 L##0, L##1, L##2, L##3, L##4, L##5, L##6, L##7, L##8, L##9, L##10, L##11, L##12, L##13, L##14, L##15; #define T(L) (__mem(g_T,tID,(L),nr_streams)) #define W(L) __mem(s_W,threadIdx.x,mod((L),17),blockDim.x) extern __shared__ __align__ (__alignof(void*)) u32 smem_cache[]; #define P(L) (__mem(g_P,tID,(L),nr_streams)) #define Q(L) (__mem(g_Q,tID,(L),nr_streams)) #define f1(x) ((rotr32((x), 7)) ^ (rotr32((x),18)) ^ ((x)>> 3)) #define f2(x) ((rotr32((x),17)) ^ (rotr32((x),19)) ^ ((x)>>10)) #define g1(x,y,z) ((rotr32((x),10)^rotr32((z),23))+rotr32((y), 8)) #define g2(x,y,z) ((rotl32((x),10)^rotl32((z),23))+rotl32((y), 8)) #define h1(x) (Q(((x)&0xff)) + Q((256+(((x)>>16)&0xff)))) #define h2(x) (P(((x)&0xff)) + P((256+(((x)>>16)&0xff)))) __global__ void HC128_keyivsetup(u32* g_P, u32* g_Q, u32 *keys, u32 key_size, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; int i,j; u32* s_W=(u32*) smem_cache; u32 k0,k1,k2,k3; u32 i0,i1,i2,i3; k0 = __mem(keys,tID,0,nr_streams); i0 = __mem(ivs,tID,0,nr_streams); k1 = __mem(keys,tID,1,nr_streams); i1 = __mem(ivs,tID,1,nr_streams); k2 = __mem(keys,tID,2,nr_streams); i2 = __mem(ivs,tID,2,nr_streams); k3 = __mem(keys,tID,3,nr_streams); i3 = __mem(ivs,tID,3,nr_streams); W(0)=W(4)=k0; W(1)=W(5)=k1; W(2)=W(6)=k2; W(3)=W(7)=k3; W( 8)=W(12)=i0; W( 9)=W(13)=i1; W(10)=W(14)=i2; W(11)=W(15)=i3; for(j=16,i=16;i<256; j=mod((j+1),17), i++) { W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i; } for(i=0;i<512; j=mod((j+1),17), i++) { P(i) = W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i + 256; } for(i=0;i<512; j=mod((j+1),17), i++) { Q(i) = W(j) = f2(W(j-2)) + W(j-7) + f1(W(j-15)) + W(j-16) + i + 768; } for(i=0;i<512;i++) { int x = mod((i- 3),512), y = mod((i- 10),512), z = mod((i-511),512), w = mod((i- 12),512); P(i) = ( P(i) + g1(P(x),P(y),P(z)) ) ^ h1(P(w)); } for(i=0;i<512;i++) { int x = mod((i- 3),512), y = mod((i- 10),512), z = mod((i-511),512), w = mod((i- 12),512); Q(i) = ( Q(i) + g2(Q(x),Q(y),Q(z)) ) ^ h2(Q(w)); } } __global__ void HC128_process_bytes(gSTREAM_action act, u32* g_P, u32 *g_Q, u32 *buff, u32 nr_words_done, u32 nr_words) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; for(int wordno=nr_words_done;wordno<nr_words;wordno++) { u32 output_word=0; if(act!=GEN_KEYSTREAM) { output_word=__mem(buff,tID,wordno,nr_streams); } int j = wordno & 0x1ff; int x = mod((j- 3),512), y = mod((j- 10),512), z = mod((j-511),512), w = mod((j- 12),512); if((wordno&0x3ff)<512) { P(j)+=g1(P(x),P(y),P(z)); output_word=h1(P(w))^P(j); } else { Q(j)+=g2(Q(x),Q(y),Q(z)); output_word=h2(Q(w))^Q(j); } __mem(buff,tID,wordno,nr_streams)=output_word; } } #endif
4cf4fb3bd65637178d996a7cef7b114d0e9a7e5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ int dev_get_a0(double *a0, double kb, float* rndgauss_field, float* rndunif_field, int ind){ float a,b,y,help; a = abs(rndgauss_field[ind]); // need gaussian dist b = -log(1.0f - rndunif_field[4*ind]); // we need 4 unif randoms per site , use the 0th here y = a*a + b; y = y/kb; help = rndunif_field[4*ind+1]; // we need 4 unif randoms per site , use the 1st here if((2.0f * (help*help)) <= (2.0f -y)){ (*a0) = (double)(1.0f - y); return(1); } else{ (*a0) = 0.0; return(0); } } __device__ void dev_heatbath_su2(dev_su2* alpha, dev_su2* w, double beta, float* rndgauss_field, float* rndunif_field, int ind){ dev_su2 v, a; double k, rdet, cos_theta, sin_theta, phi, norm; int ret; k = (*w).a.x*(*w).a.x + (*w).a.y*(*w).a.y + (*w).b.x*(*w).b.x + (*w).b.y*(*w).b.y ; rdet = rsqrt(k); v.a.x = (*w).a.x*rdet; v.a.y = (*w).a.y*rdet; v.b.x = (*w).b.x*rdet; v.b.y = (*w).b.y*rdet; ret = dev_get_a0(&(a.a.x), k*beta, rndgauss_field, rndunif_field, ind); norm = sqrt(1.0 - a.a.x*a.a.x); cos_theta = 2.0 * rndunif_field[4*ind+2] - 1.0; // we need 4 unif randoms per site , use the 2nd here sin_theta = sqrt( 1.0 - cos_theta*cos_theta ); phi = 6.2831853071795862 * (double) rndunif_field[4*ind+3]; // we need 4 unif randoms per site , use the 3rd here sincos(phi, &(a.b.x), &(a.a.y)); a.a.y = norm * sin_theta * a.a.y; a.b.x = norm * sin_theta * a.b.x; a.b.y = norm * cos_theta; if(ret == 1){ // a0 was accepted in the first place in dev_get_a0 dev_su2_ti_su2(alpha,&a,&v); } else{ // a0 was not accepted in the first place -> no update -> put alpha = w (*alpha).a.x = (*w).a.x; (*alpha).a.y = (*w).a.y; (*alpha).b.x = (*w).b.x; (*alpha).b.y = (*w).b.y; } } __device__ void cabibbo_marinari_heatbath(dev_su3 * g, dev_su3 * star, double beta, float* rndgauss_field, float* rndunif_field, int ind){ // this routine does an relaxation update of an SU(3) matrix g int a,b,c; dev_su3 X; dev_su2 w, alpha; dev_complex dummy, dummy2, dummy3; for(a=0; a<2; a++){ for(b=a+1; b<3; b++){ dev_su3_ti_su3(&(X), g, star); w.a.x = X[a][a].re + X[b][b].re; w.b.y = -X[a][a].im + X[b][b].im; w.a.y = -X[a][b].im - X[b][a].im; w.b.x = -X[a][b].re + X[b][a].re; /* my definitions: U = w0 ID + i ( w1 sigma1 + w2 sigma2 + w3 sigma3 ) | 0 1 | | 0 -i | | 1 0 | sigma1 = | | sigma2 = | | sigma3 = | | | 1 0 | | i 0 | | 0 -1 | */ dev_heatbath_su2(&(alpha), &(w), beta, rndgauss_field, rndunif_field, ind); for(c=0; c<3; c++){ //dummy = cmplx(alpha(0),alpha(3), kind=RKIND) * u(a,c) & // + cmplx(alpha(2),alpha(1), kind=RKIND) * u(b,c) dummy = dev_cmult(dev_initcomplex(alpha.a.x,alpha.b.y),(*g)[a][c]); dummy2 = dev_cmult(dev_initcomplex(alpha.b.x,alpha.a.y),(*g)[b][c]); dummy = dev_cadd(dummy, dummy2); // u(b,c) = cmplx(-alpha(2), alpha(1), kind=RKIND) * u(a,c) & // + cmplx( alpha(0),-alpha(3), kind=RKIND) * u(b,c) dummy2 = dev_cmult(dev_initcomplex(-alpha.b.x,alpha.a.y),(*g)[a][c]); dummy3 = dev_cmult(dev_initcomplex(alpha.a.x,-alpha.b.y),(*g)[b][c]); (*g)[b][c] = dev_cadd(dummy2, dummy3); // u(a,c) = dummy (*g)[a][c] = dummy; } } } } // do a trafo heatbath update of either the even or the odd sites depending on the dev_indeo_thissite and dev_indeo_nextside // index fields __global__ void dev_heatbath_sweep(dev_su3_2v * trafo_new, dev_su3_2v * gf, dev_su3_2v * trafo, int * dev_indeo_thissite, int * dev_indeo_nextside, int * dev_nn, float* rndgauss_field, float* rndunif_field){ int eofieldpos, pos,hoppos,mu; // trafo and gauge fields read and reconstructed --> shared mem __shared__ dev_su3 gfsmem[BLOCK]; __shared__ dev_su3 trafosmem[BLOCK]; dev_su3 help, star; eofieldpos = threadIdx.x + blockDim.x*blockIdx.x; int ix = threadIdx.x; if(eofieldpos < dev_VOLUME/2){ pos = dev_indeo_thissite[eofieldpos]; dev_su3zero( &(star) ); // calculate the STAR //#pragma unroll 4 for(mu=0;mu<4;mu++){ //positive dir hoppos = dev_nn[8*pos+mu]; //gauge_field U_mu(x) #ifdef GF_8 dev_reconstructgf_8texref(gf, (4*pos+mu),&(gfsmem[ix])); #else dev_reconstructgf_2vtexref(gf, (4*pos+mu),&(gfsmem[ix])); #endif //trafo_field g^+(x+mu) #ifdef GF_8 dev_reconstructtrafo_8texref_dagger(trafo, hoppos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref_dagger(trafo, hoppos,&(trafosmem[ix])); #endif //dev_su3_ti_su3( &(gt), &(gfsmem[ix]), &(trafosmem[ix]) ); //dev_su3_add( &(star), &(gt)); dev_add_su3_ti_su3(&(star) , &(gfsmem[ix]), &(trafosmem[ix]) ); //negative dir hoppos = dev_nn[8*pos+4+mu]; //gauge_field U_mu(x-mu)^+ #ifdef GF_8 dev_reconstructgf_8texref_dagger(gf, 4*hoppos+mu,&(gfsmem[ix])); #else dev_reconstructgf_2vtexref_dagger(gf, 4*hoppos+mu,&(gfsmem[ix])); #endif //trafo_field g^+(x-mu) #ifdef GF_8 dev_reconstructtrafo_8texref_dagger(trafo, hoppos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref_dagger(trafo, hoppos,&(trafosmem[ix])); #endif //dev_su3_ti_su3( &(gt), &(gfsmem[ix]), &(trafosmem[ix]) ); //dev_su3_add( &(star), &(gt)); dev_add_su3_ti_su3( &(star), &(gfsmem[ix]), &(trafosmem[ix]) ); } //load g(x) #ifdef GF_8 dev_reconstructtrafo_8texref(trafo, pos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref(trafo, pos,&(trafosmem[ix])); #endif dev_su3copy( &(help) , &(trafosmem[ix]) ); cabibbo_marinari_heatbath( &(help), &(star) , sa_beta, rndgauss_field, rndunif_field, eofieldpos); //also append the position of g, for the rng! //the global sa_beta is used for beta // now the old trafo field is in trafosmem the new field in help dev_su3_normalize(&(help)); #ifdef GF_8 dev_storetrafo_8(pos, trafo_new ,&(help) ); #else dev_storetrafo_2v(pos, trafo_new ,&(help) ); #endif #ifdef USETEXTURE // copy the trafofields of the sites that are not updated to destination field // e.g. if EVEN is updated just copy ODD trafos pos = dev_indeo_nextside[eofieldpos]; //load g(x) #ifdef GF_8 dev_reconstructtrafo_8texref(trafo, pos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref(trafo, pos,&(trafosmem[ix])); #endif // and store it #ifdef GF_8 dev_storetrafo_8(pos, trafo_new ,&(trafosmem[ix]) ); #else dev_storetrafo_2v(pos, trafo_new ,&(trafosmem[ix]) ); #endif #endif } } void set_sa_temperature(int i){ // Returns the temperature for a given i using a function // proposed by Peter Schemel. double a, temperature, beta; //printf("Setting new SA temp: %d\n",i); if(saparam.Tmax == saparam.Tmin){ temperature = saparam.Tmax; } else{ if(saparam.expo == 0){ a = (double)(i) / (double) (saparam.N-1); temperature = pow( ( (double) saparam.Tmin/ (double) saparam.Tmax) , a) * (double) saparam.Tmax ; } else if(saparam.expo == -1){ a = (double)(saparam.Tmin - saparam.Tmax) / (double) (saparam.N-1); temperature = (a*i + saparam.Tmax); } else{ a = pow(saparam.Tmin, -saparam.expo)- pow(saparam.Tmax, -saparam.expo); a = a / (double) (saparam.N-1); temperature = pow( (a*i + pow(saparam.Tmax,-saparam.expo)) , (-1.0/saparam.expo) ); } } beta = 1.0/temperature; // -> beta = 1.0/(3 T) , 3 for NCOL!! //printf("new SA temp = %f\n", temperature); //printf("Tmin = %f, Tmax = %f, N = %d, expo = %f\n", saparam.Tmin, saparam.Tmax, saparam.N, saparam.expo); //set this beta on device CUDA_SAFE_CALL( hipMemcpyToSymbol(sa_beta, &beta, sizeof(double)) ) ; } // perform simulated annealing gauge fixing void simannealing_gauge(){ int gridsize; double maxdada = 0.0; int i; clock_t start, stop; double timeelapsed = 0.0; hipError_t cudaerr; if((VOLUME/2)%BLOCK != 0){ printf("Error: VOLUME/2 is not a multiple of BLOCK. Aborting...\n"); exit(100); } dim3 blockdim(BLOCK,1,1); if( (VOLUME/2) >= BLOCK){ gridsize =VOLUME/2/BLOCK; } else{ gridsize=1; } dim3 griddim(gridsize,1,1); // Start timer assert((start = clock())!=-1); #ifdef USETEXTURE bind_texture_gf(dev_gf); #endif for(i=0; i<saparam.N; i++){ //set the temperature set_sa_temperature(i); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // heatbath update //printf("Doing heatbath sweep...\n"); hipGetLastError(); #ifdef USETEXTURE // update of EVEN bind_texture_trafo(dev_trafo1); hipLaunchKernelGGL(( dev_heatbath_sweep), dim3(griddim), dim3(blockdim) , 0, 0, dev_trafo2, dev_gf, dev_trafo1, dev_eoidx_even, dev_eoidx_odd, dev_nn, dev_rndgauss_field, dev_rndunif_field); unbind_texture_trafo(); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); hipGetLastError(); update_RNG(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // update of ODD bind_texture_trafo(dev_trafo2); hipLaunchKernelGGL(( dev_heatbath_sweep), dim3(griddim), dim3(blockdim) , 0, 0, dev_trafo1, dev_gf, dev_trafo2, dev_eoidx_odd, dev_eoidx_even, dev_nn , dev_rndgauss_field, dev_rndunif_field); unbind_texture_trafo(); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); hipGetLastError(); update_RNG(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } #else //USETEXTURE // update of EVEN hipLaunchKernelGGL(( dev_heatbath_sweep), dim3(griddim), dim3(blockdim) , 0, 0, dev_trafo1, dev_gf, dev_trafo1, dev_eoidx_even, dev_eoidx_odd, dev_nn, dev_rndgauss_field, dev_rndunif_field); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); hipGetLastError(); update_RNG(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // update of ODD hipLaunchKernelGGL(( dev_heatbath_sweep), dim3(griddim), dim3(blockdim) , 0, 0, dev_trafo1, dev_gf, dev_trafo1, dev_eoidx_odd, dev_eoidx_even, dev_nn, dev_rndgauss_field, dev_rndunif_field); hipDeviceSynchronize(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); hipGetLastError(); update_RNG(); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } #endif //USETEXTURE if((i%saparam.checkint) == 0){ //printf("recalculating functional...\n"); //recalculate dAdA #ifdef USETEXTURE bind_texture_trafo(dev_trafo1); #endif hipGetLastError(); maxdada = calc_functional(dev_gf, dev_trafo1); cudaerr = hipGetLastError(); if(cudaerr != hipSuccess){ printf("%s\n", hipGetErrorString(cudaerr)); } #ifdef USETEXTURE unbind_texture_trafo(); #endif printf("iter %6d:\t FUNC = %.16e \t dAdA = %.16e\t max(dAdA) = %.16e\n",i, FUNC, DADA, maxdada); } }//i #ifdef USETEXTURE unbind_texture_gf(); #endif assert((stop = clock())!=-1); timeelapsed = (double) (stop-start)/CLOCKS_PER_SEC; printf("SA finished after %f sec\n", timeelapsed); }
4cf4fb3bd65637178d996a7cef7b114d0e9a7e5e.cu
__device__ int dev_get_a0(double *a0, double kb, float* rndgauss_field, float* rndunif_field, int ind){ float a,b,y,help; a = abs(rndgauss_field[ind]); // need gaussian dist b = -log(1.0f - rndunif_field[4*ind]); // we need 4 unif randoms per site , use the 0th here y = a*a + b; y = y/kb; help = rndunif_field[4*ind+1]; // we need 4 unif randoms per site , use the 1st here if((2.0f * (help*help)) <= (2.0f -y)){ (*a0) = (double)(1.0f - y); return(1); } else{ (*a0) = 0.0; return(0); } } __device__ void dev_heatbath_su2(dev_su2* alpha, dev_su2* w, double beta, float* rndgauss_field, float* rndunif_field, int ind){ dev_su2 v, a; double k, rdet, cos_theta, sin_theta, phi, norm; int ret; k = (*w).a.x*(*w).a.x + (*w).a.y*(*w).a.y + (*w).b.x*(*w).b.x + (*w).b.y*(*w).b.y ; rdet = rsqrt(k); v.a.x = (*w).a.x*rdet; v.a.y = (*w).a.y*rdet; v.b.x = (*w).b.x*rdet; v.b.y = (*w).b.y*rdet; ret = dev_get_a0(&(a.a.x), k*beta, rndgauss_field, rndunif_field, ind); norm = sqrt(1.0 - a.a.x*a.a.x); cos_theta = 2.0 * rndunif_field[4*ind+2] - 1.0; // we need 4 unif randoms per site , use the 2nd here sin_theta = sqrt( 1.0 - cos_theta*cos_theta ); phi = 6.2831853071795862 * (double) rndunif_field[4*ind+3]; // we need 4 unif randoms per site , use the 3rd here sincos(phi, &(a.b.x), &(a.a.y)); a.a.y = norm * sin_theta * a.a.y; a.b.x = norm * sin_theta * a.b.x; a.b.y = norm * cos_theta; if(ret == 1){ // a0 was accepted in the first place in dev_get_a0 dev_su2_ti_su2(alpha,&a,&v); } else{ // a0 was not accepted in the first place -> no update -> put alpha = w (*alpha).a.x = (*w).a.x; (*alpha).a.y = (*w).a.y; (*alpha).b.x = (*w).b.x; (*alpha).b.y = (*w).b.y; } } __device__ void cabibbo_marinari_heatbath(dev_su3 * g, dev_su3 * star, double beta, float* rndgauss_field, float* rndunif_field, int ind){ // this routine does an relaxation update of an SU(3) matrix g int a,b,c; dev_su3 X; dev_su2 w, alpha; dev_complex dummy, dummy2, dummy3; for(a=0; a<2; a++){ for(b=a+1; b<3; b++){ dev_su3_ti_su3(&(X), g, star); w.a.x = X[a][a].re + X[b][b].re; w.b.y = -X[a][a].im + X[b][b].im; w.a.y = -X[a][b].im - X[b][a].im; w.b.x = -X[a][b].re + X[b][a].re; /* my definitions: U = w0 ID + i ( w1 sigma1 + w2 sigma2 + w3 sigma3 ) | 0 1 | | 0 -i | | 1 0 | sigma1 = | | sigma2 = | | sigma3 = | | | 1 0 | | i 0 | | 0 -1 | */ dev_heatbath_su2(&(alpha), &(w), beta, rndgauss_field, rndunif_field, ind); for(c=0; c<3; c++){ //dummy = cmplx(alpha(0),alpha(3), kind=RKIND) * u(a,c) & // + cmplx(alpha(2),alpha(1), kind=RKIND) * u(b,c) dummy = dev_cmult(dev_initcomplex(alpha.a.x,alpha.b.y),(*g)[a][c]); dummy2 = dev_cmult(dev_initcomplex(alpha.b.x,alpha.a.y),(*g)[b][c]); dummy = dev_cadd(dummy, dummy2); // u(b,c) = cmplx(-alpha(2), alpha(1), kind=RKIND) * u(a,c) & // + cmplx( alpha(0),-alpha(3), kind=RKIND) * u(b,c) dummy2 = dev_cmult(dev_initcomplex(-alpha.b.x,alpha.a.y),(*g)[a][c]); dummy3 = dev_cmult(dev_initcomplex(alpha.a.x,-alpha.b.y),(*g)[b][c]); (*g)[b][c] = dev_cadd(dummy2, dummy3); // u(a,c) = dummy (*g)[a][c] = dummy; } } } } // do a trafo heatbath update of either the even or the odd sites depending on the dev_indeo_thissite and dev_indeo_nextside // index fields __global__ void dev_heatbath_sweep(dev_su3_2v * trafo_new, dev_su3_2v * gf, dev_su3_2v * trafo, int * dev_indeo_thissite, int * dev_indeo_nextside, int * dev_nn, float* rndgauss_field, float* rndunif_field){ int eofieldpos, pos,hoppos,mu; // trafo and gauge fields read and reconstructed --> shared mem __shared__ dev_su3 gfsmem[BLOCK]; __shared__ dev_su3 trafosmem[BLOCK]; dev_su3 help, star; eofieldpos = threadIdx.x + blockDim.x*blockIdx.x; int ix = threadIdx.x; if(eofieldpos < dev_VOLUME/2){ pos = dev_indeo_thissite[eofieldpos]; dev_su3zero( &(star) ); // calculate the STAR //#pragma unroll 4 for(mu=0;mu<4;mu++){ //positive dir hoppos = dev_nn[8*pos+mu]; //gauge_field U_mu(x) #ifdef GF_8 dev_reconstructgf_8texref(gf, (4*pos+mu),&(gfsmem[ix])); #else dev_reconstructgf_2vtexref(gf, (4*pos+mu),&(gfsmem[ix])); #endif //trafo_field g^+(x+mu) #ifdef GF_8 dev_reconstructtrafo_8texref_dagger(trafo, hoppos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref_dagger(trafo, hoppos,&(trafosmem[ix])); #endif //dev_su3_ti_su3( &(gt), &(gfsmem[ix]), &(trafosmem[ix]) ); //dev_su3_add( &(star), &(gt)); dev_add_su3_ti_su3(&(star) , &(gfsmem[ix]), &(trafosmem[ix]) ); //negative dir hoppos = dev_nn[8*pos+4+mu]; //gauge_field U_mu(x-mu)^+ #ifdef GF_8 dev_reconstructgf_8texref_dagger(gf, 4*hoppos+mu,&(gfsmem[ix])); #else dev_reconstructgf_2vtexref_dagger(gf, 4*hoppos+mu,&(gfsmem[ix])); #endif //trafo_field g^+(x-mu) #ifdef GF_8 dev_reconstructtrafo_8texref_dagger(trafo, hoppos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref_dagger(trafo, hoppos,&(trafosmem[ix])); #endif //dev_su3_ti_su3( &(gt), &(gfsmem[ix]), &(trafosmem[ix]) ); //dev_su3_add( &(star), &(gt)); dev_add_su3_ti_su3( &(star), &(gfsmem[ix]), &(trafosmem[ix]) ); } //load g(x) #ifdef GF_8 dev_reconstructtrafo_8texref(trafo, pos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref(trafo, pos,&(trafosmem[ix])); #endif dev_su3copy( &(help) , &(trafosmem[ix]) ); cabibbo_marinari_heatbath( &(help), &(star) , sa_beta, rndgauss_field, rndunif_field, eofieldpos); //also append the position of g, for the rng! //the global sa_beta is used for beta // now the old trafo field is in trafosmem the new field in help dev_su3_normalize(&(help)); #ifdef GF_8 dev_storetrafo_8(pos, trafo_new ,&(help) ); #else dev_storetrafo_2v(pos, trafo_new ,&(help) ); #endif #ifdef USETEXTURE // copy the trafofields of the sites that are not updated to destination field // e.g. if EVEN is updated just copy ODD trafos pos = dev_indeo_nextside[eofieldpos]; //load g(x) #ifdef GF_8 dev_reconstructtrafo_8texref(trafo, pos,&(trafosmem[ix])); #else dev_reconstructtrafo_2vtexref(trafo, pos,&(trafosmem[ix])); #endif // and store it #ifdef GF_8 dev_storetrafo_8(pos, trafo_new ,&(trafosmem[ix]) ); #else dev_storetrafo_2v(pos, trafo_new ,&(trafosmem[ix]) ); #endif #endif } } void set_sa_temperature(int i){ // Returns the temperature for a given i using a function // proposed by Peter Schemel. double a, temperature, beta; //printf("Setting new SA temp: %d\n",i); if(saparam.Tmax == saparam.Tmin){ temperature = saparam.Tmax; } else{ if(saparam.expo == 0){ a = (double)(i) / (double) (saparam.N-1); temperature = pow( ( (double) saparam.Tmin/ (double) saparam.Tmax) , a) * (double) saparam.Tmax ; } else if(saparam.expo == -1){ a = (double)(saparam.Tmin - saparam.Tmax) / (double) (saparam.N-1); temperature = (a*i + saparam.Tmax); } else{ a = pow(saparam.Tmin, -saparam.expo)- pow(saparam.Tmax, -saparam.expo); a = a / (double) (saparam.N-1); temperature = pow( (a*i + pow(saparam.Tmax,-saparam.expo)) , (-1.0/saparam.expo) ); } } beta = 1.0/temperature; // -> beta = 1.0/(3 T) , 3 for NCOL!! //printf("new SA temp = %f\n", temperature); //printf("Tmin = %f, Tmax = %f, N = %d, expo = %f\n", saparam.Tmin, saparam.Tmax, saparam.N, saparam.expo); //set this beta on device CUDA_SAFE_CALL( cudaMemcpyToSymbol(sa_beta, &beta, sizeof(double)) ) ; } // perform simulated annealing gauge fixing void simannealing_gauge(){ int gridsize; double maxdada = 0.0; int i; clock_t start, stop; double timeelapsed = 0.0; cudaError_t cudaerr; if((VOLUME/2)%BLOCK != 0){ printf("Error: VOLUME/2 is not a multiple of BLOCK. Aborting...\n"); exit(100); } dim3 blockdim(BLOCK,1,1); if( (VOLUME/2) >= BLOCK){ gridsize =VOLUME/2/BLOCK; } else{ gridsize=1; } dim3 griddim(gridsize,1,1); // Start timer assert((start = clock())!=-1); #ifdef USETEXTURE bind_texture_gf(dev_gf); #endif for(i=0; i<saparam.N; i++){ //set the temperature set_sa_temperature(i); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // heatbath update //printf("Doing heatbath sweep...\n"); cudaGetLastError(); #ifdef USETEXTURE // update of EVEN bind_texture_trafo(dev_trafo1); dev_heatbath_sweep<<< griddim, blockdim >>> (dev_trafo2, dev_gf, dev_trafo1, dev_eoidx_even, dev_eoidx_odd, dev_nn, dev_rndgauss_field, dev_rndunif_field); unbind_texture_trafo(); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); cudaGetLastError(); update_RNG(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // update of ODD bind_texture_trafo(dev_trafo2); dev_heatbath_sweep<<< griddim, blockdim >>> (dev_trafo1, dev_gf, dev_trafo2, dev_eoidx_odd, dev_eoidx_even, dev_nn , dev_rndgauss_field, dev_rndunif_field); unbind_texture_trafo(); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); cudaGetLastError(); update_RNG(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } #else //USETEXTURE // update of EVEN dev_heatbath_sweep<<< griddim, blockdim >>> (dev_trafo1, dev_gf, dev_trafo1, dev_eoidx_even, dev_eoidx_odd, dev_nn, dev_rndgauss_field, dev_rndunif_field); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); cudaGetLastError(); update_RNG(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // update of ODD dev_heatbath_sweep<<< griddim, blockdim >>> (dev_trafo1, dev_gf, dev_trafo1, dev_eoidx_odd, dev_eoidx_even, dev_nn, dev_rndgauss_field, dev_rndunif_field); cudaThreadSynchronize(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } // generate new random numbers //printf("Updating the random numbers...\n"); cudaGetLastError(); update_RNG(); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } #endif //USETEXTURE if((i%saparam.checkint) == 0){ //printf("recalculating functional...\n"); //recalculate dAdA #ifdef USETEXTURE bind_texture_trafo(dev_trafo1); #endif cudaGetLastError(); maxdada = calc_functional(dev_gf, dev_trafo1); cudaerr = cudaGetLastError(); if(cudaerr != cudaSuccess){ printf("%s\n", cudaGetErrorString(cudaerr)); } #ifdef USETEXTURE unbind_texture_trafo(); #endif printf("iter %6d:\t FUNC = %.16e \t dAdA = %.16e\t max(dAdA) = %.16e\n",i, FUNC, DADA, maxdada); } }//i #ifdef USETEXTURE unbind_texture_gf(); #endif assert((stop = clock())!=-1); timeelapsed = (double) (stop-start)/CLOCKS_PER_SEC; printf("SA finished after %f sec\n", timeelapsed); }
d92f6b8a2fdc88a400fcc62c4d0e201820b88585.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:12 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( double *arg0, int *arg1, int offset_s, int set_size ) { double arg0_l[4]; int arg1_l[1]; for (int d=0; d<1; d++) arg1_l[d]=ZERO_int; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((double *)arg_s)[m+tid*4]; // user-supplied kernel call update( arg0_l, arg1_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg0_l[m]; for (int m=0; m<4; m++) arg0[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ int *arg1h = (int *)arg1.data; int nargs = 2; op_arg args[2]; args[0] = arg0; args[1] = arg1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int)); reduct_size = MAX(reduct_size,sizeof(int)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OP_reduct_h + reduct_bytes; arg1.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((int *)arg1.data)[d+b*1] = ZERO_int; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(double)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (int *) arg1.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1]; arg1.data = (char *)arg1h; op_mpi_reduce(&arg1,arg1h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size * 2.0f; }
d92f6b8a2fdc88a400fcc62c4d0e201820b88585.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:12 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( double *arg0, int *arg1, int offset_s, int set_size ) { double arg0_l[4]; int arg1_l[1]; for (int d=0; d<1; d++) arg1_l[d]=ZERO_int; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((double *)arg_s)[m+tid*4]; // user-supplied kernel call update( arg0_l, arg1_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((double *)arg_s)[m+tid*4] = arg0_l[m]; for (int m=0; m<4; m++) arg0[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems]; } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ int *arg1h = (int *)arg1.data; int nargs = 2; op_arg args[2]; args[0] = arg0; args[1] = arg1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int)); reduct_size = MAX(reduct_size,sizeof(int)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OP_reduct_h + reduct_bytes; arg1.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((int *)arg1.data)[d+b*1] = ZERO_int; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(double)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); op_cuda_update<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (int *) arg1.data_d, offset_s, set->size ); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1]; arg1.data = (char *)arg1h; op_mpi_reduce(&arg1,arg1h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size * 2.0f; }
ea1746940f5324a50b0542bd2586bfdd634106ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../include/rbm_dynamic_cublas.h" #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "rocblas.h" #include "../include/constants.h" using namespace std; #define SAMPLING_KERNEL_BLOCK_SIZE 32 namespace dynamic_cublas { //Call this with 1 thread to init dev_cublas_handle. __global__ void init_cublas_handle(hipblasHandle_t * handle) { hipblasCreate(handle); } // __global__ void init_cublas_handle_stream(hipblasHandle_t * handle, hipStream_t stream) { // hipblasSetStream(*handle, stream); // } //Call this with 1 thread to free _devcublas_handle memory. __global__ void destroy_cublas_handle(hipblasHandle_t * handle) { hipblasDestroy(*handle); } //PRE: matrix is in row-major format with m rows and n columns. // vector has length n. result has length m. //POST: writes matrix.vector to result. __device__ void matrix_dot_vector(DTYPE * matrix, int m, int n, DTYPE * vector, DTYPE * result, bool transpose, hipblasHandle_t * handle) { //Note: A cublas transform is performed iff transpose is false. //This is because the transform is used to rotate from row major //to column major, as cublas needs. DTYPE alpha = 1.0; DTYPE beta = 0.0; #ifdef USING_DOUBLES hipblasDgemv(*handle, transpose ? HIPBLAS_OP_N : HIPBLAS_OP_T, n, m, &alpha, matrix, n, vector, 1, &beta, result, 1); #else hipblasSgemv(*handle, transpose ? HIPBLAS_OP_N : HIPBLAS_OP_T, n, m, &alpha, matrix, n, vector, 1, &beta, result, 1); #endif } //This global function is designed to do 3 things: //1: Basic vector addition to add bias to mean. //2: Set mean[i] = sigmoid(mean[i]) //3: Run random sampling and store in sample. //PRE: len(mean) == len(sample) == len(bias) == length __global__ void finish_sampling_kernel(DTYPE * mean, DTYPE * sample, DTYPE * bias, int length, hiprandState_t * curand_state_ptr) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length) { DTYPE mean_i = 1.0 / (1.0 + exp(-(mean[i] + bias[i]))); mean[i] = mean_i; #ifdef USING_DOUBLES DTYPE r = hiprand_uniform_double(&curand_state_ptr[i]); #else DTYPE r = hiprand_uniform(&curand_state_ptr[i]); #endif sample[i] = r < mean_i; } } __device__ void sample_h_given_v(DTYPE *v0_sample, DTYPE *mean, DTYPE *sample, DTYPE * W, DTYPE * hbias, hiprandState_t * curand_state_ptr, hipblasHandle_t * handle) { //Goal is to compute (W . v0_sample) + hbias and store in mean, //then do the random number thing and store in sample matrix_dot_vector(W, const_n_hidden, const_n_visible, v0_sample, mean, false, handle); int num_blocks = ((const_n_hidden - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; hipLaunchKernelGGL(( finish_sampling_kernel), dim3(num_blocks), dim3(SAMPLING_KERNEL_BLOCK_SIZE), 0, 0, mean, sample, hbias, const_n_hidden, curand_state_ptr); hipDeviceSynchronize(); } //1: perform operation Transpose(W) . h0_sample //2: Run finish_sampling_kernel to do the rest of the operations. __device__ void sample_v_given_h(DTYPE *h0_sample, DTYPE *mean, DTYPE *sample, DTYPE * W, DTYPE * vbias, hiprandState_t * curand_state_ptr, hipblasHandle_t * handle) { matrix_dot_vector(W, const_n_hidden, const_n_visible, h0_sample, mean, true, handle); // hipDeviceSynchronize(); // if(blockIdx.x * blockDim.x + threadIdx.x == 0) { // printf("first 10 mean\n"); // for(int i = 0; i < 10; i++) { // printf("%f\n", mean[i]); // } // } int num_blocks = ((const_n_visible - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; hipLaunchKernelGGL(( finish_sampling_kernel), dim3(num_blocks), dim3(SAMPLING_KERNEL_BLOCK_SIZE), 0, 0, mean, sample, vbias, const_n_visible, curand_state_ptr); } __device__ void gibbs_hvh(DTYPE *h0_sample, DTYPE *nv_means, DTYPE *nv_samples, DTYPE *nh_means, DTYPE *nh_samples, DTYPE * W, DTYPE * hbias, DTYPE * vbias, hiprandState_t * curand_state_ptr, hipblasHandle_t * handle) { sample_v_given_h(h0_sample, nv_means, nv_samples, W, vbias, curand_state_ptr, handle); sample_h_given_v(nv_samples, nh_means, nh_samples, W, hbias, curand_state_ptr, handle); } __global__ void cd_gpu(DTYPE * data, int curr_i, int data_num_cols, DTYPE * W, DTYPE * hbias, DTYPE * vbias, hiprandState_t * curand_states, DTYPE * ph_mean_batch, DTYPE * nv_means_batch, DTYPE * nh_means_batch, DTYPE * ph_sample_batch, DTYPE * nv_samples_batch, DTYPE * nh_samples_batch, int curand_batch_width, hipblasHandle_t * handle) { int batch_i = blockDim.x * blockIdx.x + threadIdx.x; if(batch_i < const_batch_size) { DTYPE * ph_mean = &ph_mean_batch[batch_i * const_n_hidden]; DTYPE * nv_means = &nv_means_batch[batch_i * const_n_visible]; DTYPE * nh_means = &nh_means_batch[batch_i * const_n_hidden]; DTYPE * ph_sample = &ph_sample_batch[batch_i * const_n_hidden]; DTYPE * nv_samples = &nv_samples_batch[batch_i * const_n_visible]; DTYPE * nh_samples = &nh_samples_batch[batch_i * const_n_hidden]; // if(batch_i == 0) // printf("hmean[0] = %f\n", nh_means[0]); hiprandState_t * curand_state_ptr = &curand_states[batch_i * curand_batch_width]; DTYPE * input = &data[data_num_cols * (curr_i*const_batch_size+batch_i)]; sample_h_given_v(input, ph_mean, ph_sample, W, hbias, curand_state_ptr, handle); // printf("ph_mean[0], ph_mean[1] = %f, %f\n", ph_mean[0], ph_mean[1]); //Gibbs hvh but with different parameters sample_v_given_h(ph_sample, nv_means, nv_samples, W, vbias, curand_state_ptr, handle); sample_h_given_v(nv_samples, nh_means, nh_samples, W, hbias, curand_state_ptr, handle); for(int step = 1; step < const_k; step++) {//Repeat as necessary for k gibbs_hvh(nh_samples, nv_means, nv_samples, nh_means, nh_samples, W, hbias, vbias, curand_state_ptr, handle); } } } __global__ void write_results_to_memory(DTYPE * data, DTYPE * W, DTYPE lr, DTYPE wc, DTYPE * ph_mean_batch, DTYPE * nv_means_batch, DTYPE * nh_means_batch, DTYPE * ph_sample_batch, DTYPE * nv_samples_batch, DTYPE * nh_samples_batch, DTYPE * hbias, DTYPE * vbias, DTYPE * dhbias, DTYPE * dvbias, int data_num_rows, int data_num_cols, int curr_i) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; if((i < const_n_hidden) && (j < const_n_visible)) { //Assert: dW array should be 0. __shared__ DTYPE dW_shared[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // __shared__ DTYPE dhbias_shared[MAX_THREAD_SQUARE_EDGE]; // __shared__ DTYPE dvbias_shared[MAX_THREAD_SQUARE_EDGE]; int shared_i = i % MAX_THREAD_SQUARE_EDGE; int shared_j = j % MAX_THREAD_SQUARE_EDGE; dW_shared[shared_i][shared_j] = 0; // if(j == 0) dhbias_shared[shared_i] = 0; // if(i == 0) dvbias_shared[shared_j] = 0; for(int batch_i = 0; batch_i < const_batch_size; batch_i++) { DTYPE * ph_mean = &ph_mean_batch [batch_i * const_n_hidden]; DTYPE * nh_means = &nh_means_batch [batch_i * const_n_hidden]; DTYPE * nv_samples = &nv_samples_batch[batch_i * const_n_visible]; DTYPE * input = &data[data_num_cols * (curr_i*const_batch_size+batch_i)]; dW_shared[shared_i][shared_j] += ph_mean[i] * input[j] - nh_means[i] * nv_samples[j]; if(j == 0) dhbias[i] += ph_mean[i] - nh_means[i]; if(i == 0) dvbias[j] += input[j] - nv_samples[j]; } //Surprisingly enough, this is not a race condition, because //each thread only depends on itself for this computation DTYPE * W_row_i = &W[const_n_visible * i]; W_row_i[j] = W_row_i[j] + lr * (dW_shared[shared_i][shared_j] / const_batch_size - wc * W_row_i[j]); if(j == 0) hbias[i] += lr * dhbias[i] / const_batch_size; if(i == 0) vbias[j] += lr * dvbias[j] / const_batch_size; } } void RBM_dynamic_cublas::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { reset_d_arrays(); if(batch_size > MAX_THREADS) { cerr << "ERROR: batch_size cannot exceed 1024" << endl; } // GET_TIME(k1_t1); // cerr << "time: " << k1_t1 << endl; int cd_blocks = 1 + (batch_size - 1) / NUM_BATCH_THREADS_PER_BLOCK; hipLaunchKernelGGL(( cd_gpu) , dim3(cd_blocks), dim3(NUM_BATCH_THREADS_PER_BLOCK), 0, stream, dev_data, curr_i, data_num_cols, dev_W, dev_hbias, dev_vbias, dev_curand_states, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, curand_batch_width, dev_handle); hipDeviceSynchronize(); // GET_TIME(k1_t2); // cerr << "k1 time: " << get_duration(k1_t1, k1_t2) << endl; // CUDA_CHECK(hipGetLastError()); DTYPE * array = new DTYPE[n_hidden * batch_size]; hipMemcpy(array, dev_ph_sample_batch, sizeof(DTYPE) * n_hidden * batch_size, hipMemcpyDeviceToHost); string filename = "array2.dat"; saveArray(array, n_hidden * batch_size, filename); delete[] array; // cerr << "Initiating write\n"; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // GET_TIME(k2_t1); hipLaunchKernelGGL(( write_results_to_memory) , dim3(num_blocks), dim3(num_threads), 0, stream, dev_data, dev_W, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // hipDeviceSynchronize(); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; // CUDA_CHECK(hipGetLastError()); } //Can the need for this function be removed by using that weird //type checking mechanism I saw in that CUDA sample code? void RBM_dynamic_cublas::allocate_special_memory() { // data = new DTYPE[data_num_rows * data_num_cols]; // for(int i = 0; i < data_num_rows * data_num_cols; i++) { // data[i] = (DTYPE) int_data[i]; // } CUDA_CHECK(hipMalloc((void**)&dev_ph_sample_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nv_samples_batch, sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nh_samples_batch, sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_W, n_hidden * n_visible * sizeof(DTYPE))); matrixToArray (W, WArray, n_hidden, n_visible); CUDA_CHECK(hipMemcpy(dev_W, WArray, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyHostToDevice)); // CUDA_CHECK(hipMalloc((void**)&dev_data, // data_num_rows * data_num_cols * sizeof(DTYPE))); // CUDA_CHECK(hipMemcpy(dev_data, data, data_num_rows * data_num_cols * sizeof(DTYPE), // hipMemcpyHostToDevice)); } void RBM_dynamic_cublas::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: CUDA_CHECK(hipMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(hipMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(hipMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } void RBM_dynamic_cublas::copy_matrices_to_host() { CUDA_CHECK(hipMemcpy(WArray, dev_W, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyDeviceToHost)); arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(hipMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), hipMemcpyDeviceToHost)); } // void RBM_dynamic_cublas::saveWeightMatrix() { // cout << "RBM_dynamic_cublas saveWeightMatrix" << endl; // copy_matrices_to_host(); // matrixToArray (W, WArray, n_hidden, n_visible); // string wFilename(MATRIX_FILENAME); // saveMatrix(WArray, (size_t) n_hidden, (size_t) n_visible, wFilename); // string hbiasFilename("hbias.dat"); // saveArray(hbias, (size_t) n_hidden, hbiasFilename); // string vbiasFilename("vbias.dat"); // saveArray(vbias, (size_t) n_visible, vbiasFilename); // } RBM_dynamic_cublas::RBM_dynamic_cublas(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) : baseline::RBM(size, n_v, n_h, b_size, k, w, hb, vb, data_num_rows, data_num_cols) { hipMalloc((void**) &dev_handle, sizeof(hipblasHandle_t)); hipLaunchKernelGGL(( init_cublas_handle), dim3(1),dim3(1), 0, 0, dev_handle); // if(stream != NULL) { // init_cublas_handle_stream<<<1,1>>>(dev_handle, *stream); // } } RBM_dynamic_cublas::~RBM_dynamic_cublas() { hipLaunchKernelGGL(( destroy_cublas_handle), dim3(1),dim3(1), 0, 0, dev_handle); hipFree(dev_handle); } }
ea1746940f5324a50b0542bd2586bfdd634106ca.cu
#include <iostream> #include "../include/rbm_dynamic_cublas.h" #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "cublas_v2.h" #include "../include/constants.h" using namespace std; #define SAMPLING_KERNEL_BLOCK_SIZE 32 namespace dynamic_cublas { //Call this with 1 thread to init dev_cublas_handle. __global__ void init_cublas_handle(cublasHandle_t * handle) { cublasCreate(handle); } // __global__ void init_cublas_handle_stream(cublasHandle_t * handle, cudaStream_t stream) { // cublasSetStream(*handle, stream); // } //Call this with 1 thread to free _devcublas_handle memory. __global__ void destroy_cublas_handle(cublasHandle_t * handle) { cublasDestroy(*handle); } //PRE: matrix is in row-major format with m rows and n columns. // vector has length n. result has length m. //POST: writes matrix.vector to result. __device__ void matrix_dot_vector(DTYPE * matrix, int m, int n, DTYPE * vector, DTYPE * result, bool transpose, cublasHandle_t * handle) { //Note: A cublas transform is performed iff transpose is false. //This is because the transform is used to rotate from row major //to column major, as cublas needs. DTYPE alpha = 1.0; DTYPE beta = 0.0; #ifdef USING_DOUBLES cublasDgemv(*handle, transpose ? CUBLAS_OP_N : CUBLAS_OP_T, n, m, &alpha, matrix, n, vector, 1, &beta, result, 1); #else cublasSgemv(*handle, transpose ? CUBLAS_OP_N : CUBLAS_OP_T, n, m, &alpha, matrix, n, vector, 1, &beta, result, 1); #endif } //This global function is designed to do 3 things: //1: Basic vector addition to add bias to mean. //2: Set mean[i] = sigmoid(mean[i]) //3: Run random sampling and store in sample. //PRE: len(mean) == len(sample) == len(bias) == length __global__ void finish_sampling_kernel(DTYPE * mean, DTYPE * sample, DTYPE * bias, int length, curandState_t * curand_state_ptr) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length) { DTYPE mean_i = 1.0 / (1.0 + exp(-(mean[i] + bias[i]))); mean[i] = mean_i; #ifdef USING_DOUBLES DTYPE r = curand_uniform_double(&curand_state_ptr[i]); #else DTYPE r = curand_uniform(&curand_state_ptr[i]); #endif sample[i] = r < mean_i; } } __device__ void sample_h_given_v(DTYPE *v0_sample, DTYPE *mean, DTYPE *sample, DTYPE * W, DTYPE * hbias, curandState_t * curand_state_ptr, cublasHandle_t * handle) { //Goal is to compute (W . v0_sample) + hbias and store in mean, //then do the random number thing and store in sample matrix_dot_vector(W, const_n_hidden, const_n_visible, v0_sample, mean, false, handle); int num_blocks = ((const_n_hidden - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; finish_sampling_kernel<<<num_blocks, SAMPLING_KERNEL_BLOCK_SIZE>>> (mean, sample, hbias, const_n_hidden, curand_state_ptr); cudaDeviceSynchronize(); } //1: perform operation Transpose(W) . h0_sample //2: Run finish_sampling_kernel to do the rest of the operations. __device__ void sample_v_given_h(DTYPE *h0_sample, DTYPE *mean, DTYPE *sample, DTYPE * W, DTYPE * vbias, curandState_t * curand_state_ptr, cublasHandle_t * handle) { matrix_dot_vector(W, const_n_hidden, const_n_visible, h0_sample, mean, true, handle); // cudaDeviceSynchronize(); // if(blockIdx.x * blockDim.x + threadIdx.x == 0) { // printf("first 10 mean\n"); // for(int i = 0; i < 10; i++) { // printf("%f\n", mean[i]); // } // } int num_blocks = ((const_n_visible - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; finish_sampling_kernel<<<num_blocks, SAMPLING_KERNEL_BLOCK_SIZE>>> (mean, sample, vbias, const_n_visible, curand_state_ptr); } __device__ void gibbs_hvh(DTYPE *h0_sample, DTYPE *nv_means, DTYPE *nv_samples, DTYPE *nh_means, DTYPE *nh_samples, DTYPE * W, DTYPE * hbias, DTYPE * vbias, curandState_t * curand_state_ptr, cublasHandle_t * handle) { sample_v_given_h(h0_sample, nv_means, nv_samples, W, vbias, curand_state_ptr, handle); sample_h_given_v(nv_samples, nh_means, nh_samples, W, hbias, curand_state_ptr, handle); } __global__ void cd_gpu(DTYPE * data, int curr_i, int data_num_cols, DTYPE * W, DTYPE * hbias, DTYPE * vbias, curandState_t * curand_states, DTYPE * ph_mean_batch, DTYPE * nv_means_batch, DTYPE * nh_means_batch, DTYPE * ph_sample_batch, DTYPE * nv_samples_batch, DTYPE * nh_samples_batch, int curand_batch_width, cublasHandle_t * handle) { int batch_i = blockDim.x * blockIdx.x + threadIdx.x; if(batch_i < const_batch_size) { DTYPE * ph_mean = &ph_mean_batch[batch_i * const_n_hidden]; DTYPE * nv_means = &nv_means_batch[batch_i * const_n_visible]; DTYPE * nh_means = &nh_means_batch[batch_i * const_n_hidden]; DTYPE * ph_sample = &ph_sample_batch[batch_i * const_n_hidden]; DTYPE * nv_samples = &nv_samples_batch[batch_i * const_n_visible]; DTYPE * nh_samples = &nh_samples_batch[batch_i * const_n_hidden]; // if(batch_i == 0) // printf("hmean[0] = %f\n", nh_means[0]); curandState_t * curand_state_ptr = &curand_states[batch_i * curand_batch_width]; DTYPE * input = &data[data_num_cols * (curr_i*const_batch_size+batch_i)]; sample_h_given_v(input, ph_mean, ph_sample, W, hbias, curand_state_ptr, handle); // printf("ph_mean[0], ph_mean[1] = %f, %f\n", ph_mean[0], ph_mean[1]); //Gibbs hvh but with different parameters sample_v_given_h(ph_sample, nv_means, nv_samples, W, vbias, curand_state_ptr, handle); sample_h_given_v(nv_samples, nh_means, nh_samples, W, hbias, curand_state_ptr, handle); for(int step = 1; step < const_k; step++) {//Repeat as necessary for k gibbs_hvh(nh_samples, nv_means, nv_samples, nh_means, nh_samples, W, hbias, vbias, curand_state_ptr, handle); } } } __global__ void write_results_to_memory(DTYPE * data, DTYPE * W, DTYPE lr, DTYPE wc, DTYPE * ph_mean_batch, DTYPE * nv_means_batch, DTYPE * nh_means_batch, DTYPE * ph_sample_batch, DTYPE * nv_samples_batch, DTYPE * nh_samples_batch, DTYPE * hbias, DTYPE * vbias, DTYPE * dhbias, DTYPE * dvbias, int data_num_rows, int data_num_cols, int curr_i) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; if((i < const_n_hidden) && (j < const_n_visible)) { //Assert: dW array should be 0. __shared__ DTYPE dW_shared[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // __shared__ DTYPE dhbias_shared[MAX_THREAD_SQUARE_EDGE]; // __shared__ DTYPE dvbias_shared[MAX_THREAD_SQUARE_EDGE]; int shared_i = i % MAX_THREAD_SQUARE_EDGE; int shared_j = j % MAX_THREAD_SQUARE_EDGE; dW_shared[shared_i][shared_j] = 0; // if(j == 0) dhbias_shared[shared_i] = 0; // if(i == 0) dvbias_shared[shared_j] = 0; for(int batch_i = 0; batch_i < const_batch_size; batch_i++) { DTYPE * ph_mean = &ph_mean_batch [batch_i * const_n_hidden]; DTYPE * nh_means = &nh_means_batch [batch_i * const_n_hidden]; DTYPE * nv_samples = &nv_samples_batch[batch_i * const_n_visible]; DTYPE * input = &data[data_num_cols * (curr_i*const_batch_size+batch_i)]; dW_shared[shared_i][shared_j] += ph_mean[i] * input[j] - nh_means[i] * nv_samples[j]; if(j == 0) dhbias[i] += ph_mean[i] - nh_means[i]; if(i == 0) dvbias[j] += input[j] - nv_samples[j]; } //Surprisingly enough, this is not a race condition, because //each thread only depends on itself for this computation DTYPE * W_row_i = &W[const_n_visible * i]; W_row_i[j] = W_row_i[j] + lr * (dW_shared[shared_i][shared_j] / const_batch_size - wc * W_row_i[j]); if(j == 0) hbias[i] += lr * dhbias[i] / const_batch_size; if(i == 0) vbias[j] += lr * dvbias[j] / const_batch_size; } } void RBM_dynamic_cublas::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { reset_d_arrays(); if(batch_size > MAX_THREADS) { cerr << "ERROR: batch_size cannot exceed 1024" << endl; } // GET_TIME(k1_t1); // cerr << "time: " << k1_t1 << endl; int cd_blocks = 1 + (batch_size - 1) / NUM_BATCH_THREADS_PER_BLOCK; cd_gpu <<< cd_blocks, NUM_BATCH_THREADS_PER_BLOCK, 0, stream>>> (dev_data, curr_i, data_num_cols, dev_W, dev_hbias, dev_vbias, dev_curand_states, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, curand_batch_width, dev_handle); cudaDeviceSynchronize(); // GET_TIME(k1_t2); // cerr << "k1 time: " << get_duration(k1_t1, k1_t2) << endl; // CUDA_CHECK(cudaGetLastError()); DTYPE * array = new DTYPE[n_hidden * batch_size]; cudaMemcpy(array, dev_ph_sample_batch, sizeof(DTYPE) * n_hidden * batch_size, cudaMemcpyDeviceToHost); string filename = "array2.dat"; saveArray(array, n_hidden * batch_size, filename); delete[] array; // cerr << "Initiating write\n"; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // GET_TIME(k2_t1); write_results_to_memory <<< num_blocks, num_threads, 0, stream>>> (dev_data, dev_W, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // cudaDeviceSynchronize(); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; // CUDA_CHECK(cudaGetLastError()); } //Can the need for this function be removed by using that weird //type checking mechanism I saw in that CUDA sample code? void RBM_dynamic_cublas::allocate_special_memory() { // data = new DTYPE[data_num_rows * data_num_cols]; // for(int i = 0; i < data_num_rows * data_num_cols; i++) { // data[i] = (DTYPE) int_data[i]; // } CUDA_CHECK(cudaMalloc((void**)&dev_ph_sample_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nv_samples_batch, sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nh_samples_batch, sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_W, n_hidden * n_visible * sizeof(DTYPE))); matrixToArray (W, WArray, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(dev_W, WArray, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyHostToDevice)); // CUDA_CHECK(cudaMalloc((void**)&dev_data, // data_num_rows * data_num_cols * sizeof(DTYPE))); // CUDA_CHECK(cudaMemcpy(dev_data, data, data_num_rows * data_num_cols * sizeof(DTYPE), // cudaMemcpyHostToDevice)); } void RBM_dynamic_cublas::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: CUDA_CHECK(cudaMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); CUDA_CHECK(cudaMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(cudaMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } void RBM_dynamic_cublas::copy_matrices_to_host() { CUDA_CHECK(cudaMemcpy(WArray, dev_W, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyDeviceToHost)); arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), cudaMemcpyDeviceToHost)); } // void RBM_dynamic_cublas::saveWeightMatrix() { // cout << "RBM_dynamic_cublas saveWeightMatrix" << endl; // copy_matrices_to_host(); // matrixToArray (W, WArray, n_hidden, n_visible); // string wFilename(MATRIX_FILENAME); // saveMatrix(WArray, (size_t) n_hidden, (size_t) n_visible, wFilename); // string hbiasFilename("hbias.dat"); // saveArray(hbias, (size_t) n_hidden, hbiasFilename); // string vbiasFilename("vbias.dat"); // saveArray(vbias, (size_t) n_visible, vbiasFilename); // } RBM_dynamic_cublas::RBM_dynamic_cublas(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) : baseline::RBM(size, n_v, n_h, b_size, k, w, hb, vb, data_num_rows, data_num_cols) { cudaMalloc((void**) &dev_handle, sizeof(cublasHandle_t)); init_cublas_handle<<<1,1>>>(dev_handle); // if(stream != NULL) { // init_cublas_handle_stream<<<1,1>>>(dev_handle, *stream); // } } RBM_dynamic_cublas::~RBM_dynamic_cublas() { destroy_cublas_handle<<<1,1>>>(dev_handle); cudaFree(dev_handle); } }
d9078899e00d41b9805324574f9675e1c2f93555.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Program by Arthur Alves Araujo Ferreira - All rights reserved // ITESM ID: A01022593 #include <iostream> #include <chrono> const bool CPU_AND_COMPARE = true; // Function that multiplies 2 matrixes with cuda __global__ void matrixMultiplyGPU(int *A, int *B, int *C, const int n) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; if (ix < n && iy < n) { for(int k = 0; k < n; k++) { C[iy * n + ix] += A[iy * n + k] * B[k * n + ix]; } } } // Function that multiplies 2 matrixes with cpu void matrixMultiply(int *A, int *B, int *C, const int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { for(int k = 0; k < n; k++) { C[i * n + j] += A[i * n + k] * B[j + k * n]; } } } } // Compares two matrices bool checkEquals(int *hostRef,int *gpuRef, const int n) { double ep = 1.0E-8; bool same = true; for (int i = 0; i < n*n; i++) { if (abs(hostRef[i] - gpuRef[i]) > ep) { same = false; printf("[%d] host %d gpu %d\n", i, hostRef[i], gpuRef[i]); return same; } } return same; } int main(int argc, char* argv[]) { // Device setup int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); hipSetDevice(dev); // Code configuration int repetitions = 20; int n = 50; int nBytes = n*n * sizeof(int*); // Input matrix initialization and fill int *h_A = (int*)malloc(nBytes); int *h_B = (int*)malloc(nBytes); for(int i = 0; i < n*n; i++) { h_A[i] = i+1; h_B[i] = i+1; } // Result matrixes initialization and zero fill int *gpuRef = (int*)malloc(nBytes); int *hostRef = (int*)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // Device matrix global memory int *d_A, *d_B, *d_C; hipMalloc((void**)&d_A, nBytes); hipMalloc((void**)&d_B, nBytes); hipMalloc((void**)&d_C, nBytes); // Transfer data from host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); hipMemset(d_C, 0, nBytes); // Initialize matrix with 0s // Kernel execution configuration int dimx = 128; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, n); printf("grid.x %d grid.y %d block.x %d block.y %d\n", grid.x, grid.y, block.x, block.y); // Variable initialization for repetitions double totalTimeGPU = 0; double totalTimeCPU = 0; std::chrono::duration<float, std::milli> duration_ms; // Repeat however may times was configured for (int i = 0; i < repetitions; i++) { // Multiply on GPU auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( matrixMultiplyGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, n); hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeGPU += duration_ms.count(); if (CPU_AND_COMPARE) { // Multiply on CPU start = std::chrono::high_resolution_clock::now(); matrixMultiply(h_A, h_B, hostRef, n); end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeCPU += duration_ms.count(); } // Copy result from device to host hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // Check if equals if (CPU_AND_COMPARE) { if (checkEquals(hostRef, gpuRef, n)) { printf("Matrix equal %d\n", i); } else { printf("Matrixes not equal %d\n", i); break; } } } // Print results printf("GPU matrix multiplication done in %f ms\n", totalTimeGPU / repetitions); if (CPU_AND_COMPARE) printf("CPU matrix multiplication done in %f ms\n", totalTimeCPU / repetitions); // Free memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); hipDeviceReset(); return 0; }
d9078899e00d41b9805324574f9675e1c2f93555.cu
// Program by Arthur Alves Araujo Ferreira - All rights reserved // ITESM ID: A01022593 #include <iostream> #include <chrono> const bool CPU_AND_COMPARE = true; // Function that multiplies 2 matrixes with cuda __global__ void matrixMultiplyGPU(int *A, int *B, int *C, const int n) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; if (ix < n && iy < n) { for(int k = 0; k < n; k++) { C[iy * n + ix] += A[iy * n + k] * B[k * n + ix]; } } } // Function that multiplies 2 matrixes with cpu void matrixMultiply(int *A, int *B, int *C, const int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { for(int k = 0; k < n; k++) { C[i * n + j] += A[i * n + k] * B[j + k * n]; } } } } // Compares two matrices bool checkEquals(int *hostRef,int *gpuRef, const int n) { double ep = 1.0E-8; bool same = true; for (int i = 0; i < n*n; i++) { if (abs(hostRef[i] - gpuRef[i]) > ep) { same = false; printf("[%d] host %d gpu %d\n", i, hostRef[i], gpuRef[i]); return same; } } return same; } int main(int argc, char* argv[]) { // Device setup int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); cudaSetDevice(dev); // Code configuration int repetitions = 20; int n = 50; int nBytes = n*n * sizeof(int*); // Input matrix initialization and fill int *h_A = (int*)malloc(nBytes); int *h_B = (int*)malloc(nBytes); for(int i = 0; i < n*n; i++) { h_A[i] = i+1; h_B[i] = i+1; } // Result matrixes initialization and zero fill int *gpuRef = (int*)malloc(nBytes); int *hostRef = (int*)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // Device matrix global memory int *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, nBytes); cudaMalloc((void**)&d_B, nBytes); cudaMalloc((void**)&d_C, nBytes); // Transfer data from host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); cudaMemset(d_C, 0, nBytes); // Initialize matrix with 0s // Kernel execution configuration int dimx = 128; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, n); printf("grid.x %d grid.y %d block.x %d block.y %d\n", grid.x, grid.y, block.x, block.y); // Variable initialization for repetitions double totalTimeGPU = 0; double totalTimeCPU = 0; std::chrono::duration<float, std::milli> duration_ms; // Repeat however may times was configured for (int i = 0; i < repetitions; i++) { // Multiply on GPU auto start = std::chrono::high_resolution_clock::now(); matrixMultiplyGPU<<<grid, block>>>(d_A, d_B, d_C, n); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeGPU += duration_ms.count(); if (CPU_AND_COMPARE) { // Multiply on CPU start = std::chrono::high_resolution_clock::now(); matrixMultiply(h_A, h_B, hostRef, n); end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeCPU += duration_ms.count(); } // Copy result from device to host cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // Check if equals if (CPU_AND_COMPARE) { if (checkEquals(hostRef, gpuRef, n)) { printf("Matrix equal %d\n", i); } else { printf("Matrixes not equal %d\n", i); break; } } } // Print results printf("GPU matrix multiplication done in %f ms\n", totalTimeGPU / repetitions); if (CPU_AND_COMPARE) printf("CPU matrix multiplication done in %f ms\n", totalTimeCPU / repetitions); // Free memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); cudaDeviceReset(); return 0; }
9c38aba713e0a4e68dd781c02968fceeda5217be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_b2; int xdim0_update_halo_kernel1_b2_h = -1; __constant__ int ydim0_update_halo_kernel1_b2; int ydim0_update_halo_kernel1_b2_h = -1; __constant__ int xdim1_update_halo_kernel1_b2; int xdim1_update_halo_kernel1_b2_h = -1; __constant__ int ydim1_update_halo_kernel1_b2; int ydim1_update_halo_kernel1_b2_h = -1; __constant__ int xdim2_update_halo_kernel1_b2; int xdim2_update_halo_kernel1_b2_h = -1; __constant__ int ydim2_update_halo_kernel1_b2; int ydim2_update_halo_kernel1_b2_h = -1; __constant__ int xdim3_update_halo_kernel1_b2; int xdim3_update_halo_kernel1_b2_h = -1; __constant__ int ydim3_update_halo_kernel1_b2; int ydim3_update_halo_kernel1_b2_h = -1; __constant__ int xdim4_update_halo_kernel1_b2; int xdim4_update_halo_kernel1_b2_h = -1; __constant__ int ydim4_update_halo_kernel1_b2; int ydim4_update_halo_kernel1_b2_h = -1; __constant__ int xdim5_update_halo_kernel1_b2; int xdim5_update_halo_kernel1_b2_h = -1; __constant__ int ydim5_update_halo_kernel1_b2; int ydim5_update_halo_kernel1_b2_h = -1; __constant__ int xdim6_update_halo_kernel1_b2; int xdim6_update_halo_kernel1_b2_h = -1; __constant__ int ydim6_update_halo_kernel1_b2; int ydim6_update_halo_kernel1_b2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_b2*(y)+xdim0_update_halo_kernel1_b2*ydim0_update_halo_kernel1_b2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_b2*(y)+xdim1_update_halo_kernel1_b2*ydim1_update_halo_kernel1_b2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_b2*(y)+xdim2_update_halo_kernel1_b2*ydim2_update_halo_kernel1_b2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_b2*(y)+xdim3_update_halo_kernel1_b2*ydim3_update_halo_kernel1_b2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_b2*(y)+xdim4_update_halo_kernel1_b2*ydim4_update_halo_kernel1_b2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_b2*(y)+xdim5_update_halo_kernel1_b2*ydim5_update_halo_kernel1_b2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_b2*(y)+xdim6_update_halo_kernel1_b2*ydim6_update_halo_kernel1_b2*(z)) //user function __device__ inline void update_halo_kernel1_b2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,3,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,3,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,3,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,3,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,3,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,3,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,3,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_b2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_b2 + idx_z * 1*1 * xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_b2 + idx_z * 1*1 * xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_b2 + idx_z * 1*1 * xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_b2 + idx_z * 1*1 * xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_b2 + idx_z * 1*1 * xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_b2 + idx_z * 1*1 * xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2; arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_b2 + idx_z * 1*1 * xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_b2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,11)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(11,"update_halo_kernel1_b2"); OPS_kernels[11].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_b2_h || ydim0 != ydim0_update_halo_kernel1_b2_h || xdim1 != xdim1_update_halo_kernel1_b2_h || ydim1 != ydim1_update_halo_kernel1_b2_h || xdim2 != xdim2_update_halo_kernel1_b2_h || ydim2 != ydim2_update_halo_kernel1_b2_h || xdim3 != xdim3_update_halo_kernel1_b2_h || ydim3 != ydim3_update_halo_kernel1_b2_h || xdim4 != xdim4_update_halo_kernel1_b2_h || ydim4 != ydim4_update_halo_kernel1_b2_h || xdim5 != xdim5_update_halo_kernel1_b2_h || ydim5 != ydim5_update_halo_kernel1_b2_h || xdim6 != xdim6_update_halo_kernel1_b2_h || ydim6 != ydim6_update_halo_kernel1_b2_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel1_b2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_b2_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel1_b2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_b2_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel1_b2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_b2_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel1_b2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_b2_h = ydim1; hipMemcpyToSymbol( xdim2_update_halo_kernel1_b2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_b2_h = xdim2; hipMemcpyToSymbol( ydim2_update_halo_kernel1_b2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_b2_h = ydim2; hipMemcpyToSymbol( xdim3_update_halo_kernel1_b2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_b2_h = xdim3; hipMemcpyToSymbol( ydim3_update_halo_kernel1_b2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_b2_h = ydim3; hipMemcpyToSymbol( xdim4_update_halo_kernel1_b2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_b2_h = xdim4; hipMemcpyToSymbol( ydim4_update_halo_kernel1_b2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_b2_h = ydim4; hipMemcpyToSymbol( xdim5_update_halo_kernel1_b2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_b2_h = xdim5; hipMemcpyToSymbol( ydim5_update_halo_kernel1_b2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_b2_h = ydim5; hipMemcpyToSymbol( xdim6_update_halo_kernel1_b2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_b2_h = xdim6; hipMemcpyToSymbol( ydim6_update_halo_kernel1_b2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_b2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_b2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[11].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 11; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 11; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_b2_execute; if (OPS_diags > 1) { ops_timing_realloc(11,"update_halo_kernel1_b2"); } ops_enqueue_kernel(desc); } #endif
9c38aba713e0a4e68dd781c02968fceeda5217be.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_b2; int xdim0_update_halo_kernel1_b2_h = -1; __constant__ int ydim0_update_halo_kernel1_b2; int ydim0_update_halo_kernel1_b2_h = -1; __constant__ int xdim1_update_halo_kernel1_b2; int xdim1_update_halo_kernel1_b2_h = -1; __constant__ int ydim1_update_halo_kernel1_b2; int ydim1_update_halo_kernel1_b2_h = -1; __constant__ int xdim2_update_halo_kernel1_b2; int xdim2_update_halo_kernel1_b2_h = -1; __constant__ int ydim2_update_halo_kernel1_b2; int ydim2_update_halo_kernel1_b2_h = -1; __constant__ int xdim3_update_halo_kernel1_b2; int xdim3_update_halo_kernel1_b2_h = -1; __constant__ int ydim3_update_halo_kernel1_b2; int ydim3_update_halo_kernel1_b2_h = -1; __constant__ int xdim4_update_halo_kernel1_b2; int xdim4_update_halo_kernel1_b2_h = -1; __constant__ int ydim4_update_halo_kernel1_b2; int ydim4_update_halo_kernel1_b2_h = -1; __constant__ int xdim5_update_halo_kernel1_b2; int xdim5_update_halo_kernel1_b2_h = -1; __constant__ int ydim5_update_halo_kernel1_b2; int ydim5_update_halo_kernel1_b2_h = -1; __constant__ int xdim6_update_halo_kernel1_b2; int xdim6_update_halo_kernel1_b2_h = -1; __constant__ int ydim6_update_halo_kernel1_b2; int ydim6_update_halo_kernel1_b2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_b2*(y)+xdim0_update_halo_kernel1_b2*ydim0_update_halo_kernel1_b2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_b2*(y)+xdim1_update_halo_kernel1_b2*ydim1_update_halo_kernel1_b2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_b2*(y)+xdim2_update_halo_kernel1_b2*ydim2_update_halo_kernel1_b2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_b2*(y)+xdim3_update_halo_kernel1_b2*ydim3_update_halo_kernel1_b2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_b2*(y)+xdim4_update_halo_kernel1_b2*ydim4_update_halo_kernel1_b2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_b2*(y)+xdim5_update_halo_kernel1_b2*ydim5_update_halo_kernel1_b2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_b2*(y)+xdim6_update_halo_kernel1_b2*ydim6_update_halo_kernel1_b2*(z)) //user function __device__ inline void update_halo_kernel1_b2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,3,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,3,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,3,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,3,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,3,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,3,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,3,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_b2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_b2 + idx_z * 1*1 * xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_b2 + idx_z * 1*1 * xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_b2 + idx_z * 1*1 * xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_b2 + idx_z * 1*1 * xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_b2 + idx_z * 1*1 * xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_b2 + idx_z * 1*1 * xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2; arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_b2 + idx_z * 1*1 * xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_b2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,11)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(11,"update_halo_kernel1_b2"); OPS_kernels[11].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_b2_h || ydim0 != ydim0_update_halo_kernel1_b2_h || xdim1 != xdim1_update_halo_kernel1_b2_h || ydim1 != ydim1_update_halo_kernel1_b2_h || xdim2 != xdim2_update_halo_kernel1_b2_h || ydim2 != ydim2_update_halo_kernel1_b2_h || xdim3 != xdim3_update_halo_kernel1_b2_h || ydim3 != ydim3_update_halo_kernel1_b2_h || xdim4 != xdim4_update_halo_kernel1_b2_h || ydim4 != ydim4_update_halo_kernel1_b2_h || xdim5 != xdim5_update_halo_kernel1_b2_h || ydim5 != ydim5_update_halo_kernel1_b2_h || xdim6 != xdim6_update_halo_kernel1_b2_h || ydim6 != ydim6_update_halo_kernel1_b2_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel1_b2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_b2_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel1_b2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_b2_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel1_b2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_b2_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel1_b2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_b2_h = ydim1; cudaMemcpyToSymbol( xdim2_update_halo_kernel1_b2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_b2_h = xdim2; cudaMemcpyToSymbol( ydim2_update_halo_kernel1_b2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_b2_h = ydim2; cudaMemcpyToSymbol( xdim3_update_halo_kernel1_b2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_b2_h = xdim3; cudaMemcpyToSymbol( ydim3_update_halo_kernel1_b2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_b2_h = ydim3; cudaMemcpyToSymbol( xdim4_update_halo_kernel1_b2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_b2_h = xdim4; cudaMemcpyToSymbol( ydim4_update_halo_kernel1_b2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_b2_h = ydim4; cudaMemcpyToSymbol( xdim5_update_halo_kernel1_b2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_b2_h = xdim5; cudaMemcpyToSymbol( ydim5_update_halo_kernel1_b2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_b2_h = ydim5; cudaMemcpyToSymbol( xdim6_update_halo_kernel1_b2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_b2_h = xdim6; cudaMemcpyToSymbol( ydim6_update_halo_kernel1_b2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_b2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel1_b2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[11].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[11].mpi_time += t2-t1; OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 11; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 11; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_b2_execute; if (OPS_diags > 1) { ops_timing_realloc(11,"update_halo_kernel1_b2"); } ops_enqueue_kernel(desc); } #endif
fa364f0e3b05227456b720765af202574cc51948.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * cuBLASTP - Fine-Grained Parallelization of Protein Sequence Search on CPU+GPU * Version 0.1 (beta) * * (c) 2015 Virginia Polytechnic Institute & State University (Virginia Tech) * This version of cuBLASTP is licensed for non-commercial use only, * as specified in LICENSE files in licensing directory. For all other use * contact vtiplicensing@vtip.org * * Developer: Jing Zhang * */ #include "blast.h" #include "wordLookupDFA.h" #include <math.h> #include <limits.h> #include <sys/time.h> #include <inttypes.h> #include "segsort.h" #ifdef SM_20 #define __ldg(x) (*(x)) #define NUM_BLOCK 112 #else #define NUM_BLOCK 260 #endif #define BLOCK_SIZE 128 #define BIN_X 4 // CHANGE BIN_SIZE #define BIN_POWER 7 // BIN_X #define BIN_MARK 127 // BIN_X extern unsigned char *wordLookupDFA; extern struct groupFP *wordLookupDFA_groupsFP; TIMERECORD timeRecord; struct parameters { char parameters_wordSize; unsigned char encoding_numCodes; char parameters_overlap; int4 wordLookupDFA_numCodes; uint4 additionalQueryPositionOffset; int4 statistics_ungappedNominalDropoff; int4 blast_ungappedNominalTrigger; int4 parameters_A; uint4 ungappedExtensionsPerThread; uint4 ungappedExtAdditionalStartLoc; }; #define TARGET_THREAD 0 #define UNGAPEXT_PER_THREAD 150 #define TOTAL_UNGAPPED_EXT 500000 __device__ __constant__ int2 scoreMatrixC[1640]; __device__ __constant__ unsigned char querySequenceC[40000]; int compare_ungappedextension(const void *a, const void *b) { return ((struct ungappedExtension *)a)->sequenceCount - ((struct ungappedExtension *)b)->sequenceCount; } int findStartLoc(struct ungappedExtension *ungappedExtensionsPtr, int threadNo, int itemNum); __device__ struct ungappedExtension *ungappedExtension_oneHitExtendD( unsigned char *, int4, unsigned char *, struct PSSMatrixFP, unsigned char *, uint4 *, unsigned char, int4, int4, struct ungappedExtension *, uint4 *, uint4, int4); __device__ uint4 global_numAdditionalTriggerExtensions; __device__ struct coordinate ungappedExtension_findProteinSeed( struct ungappedExtension *ungappedExtension, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes); __device__ struct coordinate ungappedExtension_findProteinSeed_sm( struct ungappedExtension *ungappedExtension, // struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes, unsigned char *querySequence, int2 *scoreMatrix); __global__ void ungappedExtension_twoHitExtendG_findProteinSeed( struct parameters *parametersFP_g, struct PSSMatrixFP *PSSMatrixFP_g, unsigned char *sequence, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions); __global__ void ungappedExtension_twoHitExtendG_bin_sorted_sm_s( struct parameters *parametersFP_g, // char *matrixBody_g, uint64_t *HitInfo_g, // uint2 *num_hits, uint4 *blast_numUngappedExtensions, unsigned char *sequence, // unsigned char **hitMatrix_furthestp, // uint4 *hitMatrix_offsetp, struct PSSMatrixFP *PSSMatrixFP_g, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions, int *numOneHitsD, int *numExtD, int *binOffset_g); __device__ void ungappedExtension_twoHitExtendD_sm( unsigned char *sequenceStart, int4 queryOffset, unsigned char *subjectHit, unsigned char *lastHitFP, // struct PSSMatrixFP *PSSMatrixFP, unsigned char *subject, unsigned char **sequenceHitEnd, unsigned char encoding_numCodes, int4 statistics_ungappedNominalDropoff, int4 blast_ungappedNominalTrigger, int4 ungappedExtensionsPerThread, struct ungappedExtension *ungappedExtension_extensions, struct ungappedExtension *ungappedExtension_additonal, uint4 *numOfTriggerExtensions, uint4 sequenceCount, int2 *scoreMatrix, unsigned char *querySequence) { // int queryPosition; unsigned char *subjectPosition, *subjectStart, *subjectEnd; int4 changeSinceBest = 0; int4 dropoff, originalDropoff; int4 ungappedExtension_bestScore; originalDropoff = dropoff = -statistics_ungappedNominalDropoff; ungappedExtension_bestScore = 0; // Start at queryEnd,subjectEnd (right/last hit position) // queryPosition = matrix + queryOffset * encoding_numCodes; subjectPosition = subjectStart = subjectHit; int2 queryPosition = queryOffset + 1; while (changeSinceBest > dropoff) { // changeSinceBest += queryPosition[__ldg(subjectPosition)]; changeSinceBest += scoreMatrix [querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; // queryPosition = queryPosition - encoding_numCodes; queryPosition = queryPosition - 1; subjectPosition--; // changeSinceBest = queryPosition[__ldg(subjectPosition)]; changeSinceBest = scoreMatrix[querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectStart = subjectPosition; } // queryPosition = queryPosition - encoding_numCodes; queryPosition = queryPosition - 1; subjectPosition--; } // Correct for extra decrement subjectStart++; if (subjectStart > lastHitFP) { *sequenceHitEnd = subjectHit; return; } // Starting at right/last hit position again // queryPosition = matrix + (queryOffset + 1) * encoding_numCodes; queryPosition = (queryOffset + 2); subjectPosition = subjectHit + 1; subjectEnd = subjectHit; changeSinceBest = 0; // May need to alter dropoff so we also dropoff if below zero if (-ungappedExtension_bestScore > originalDropoff) { dropoff = -ungappedExtension_bestScore; } // Extend end of alignment until dropoff while (changeSinceBest > dropoff) { // Shucai // changeSinceBest += queryPosition[__ldg(subjectPosition)]; changeSinceBest += scoreMatrix [querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; // queryPosition = queryPosition + encoding_numCodes; queryPosition = queryPosition + 1; subjectPosition++; // changeSinceBest = queryPosition[__ldg(subjectPosition)]; changeSinceBest = scoreMatrix[querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectEnd = subjectPosition; // Check need for change in dropoff if ((dropoff = -ungappedExtension_bestScore) < originalDropoff) { dropoff = originalDropoff; } } // queryPosition = queryPosition + encoding_numCodes; queryPosition = queryPosition + 1; subjectPosition++; } subjectEnd--; *sequenceHitEnd = subjectEnd; if (ungappedExtension_bestScore >= blast_ungappedNominalTrigger) { int2 diagonal; struct ungappedExtension *newUngappedExtension = NULL; newUngappedExtension = *numOfTriggerExtensions >= ungappedExtensionsPerThread ? &ungappedExtension_additonal [atomicAdd(&global_numAdditionalTriggerExtensions, 1)] : &ungappedExtension_extensions [atomicAdd(numOfTriggerExtensions, 1)]; // newUngappedExtension = // &ungappedExtension_extensions[*numOfTriggerExtensions]; // Calculate diagonal diagonal = (subjectHit - subject) - queryOffset; // Determine offsets from pointers newUngappedExtension->start.subjectOffset = subjectStart - subject; newUngappedExtension->end.subjectOffset = subjectEnd - subject; newUngappedExtension->start.queryOffset = newUngappedExtension->start.subjectOffset - diagonal; newUngappedExtension->end.queryOffset = newUngappedExtension->end.subjectOffset - diagonal; newUngappedExtension->seed = ungappedExtension_findProteinSeed_sm( newUngappedExtension, subject, encoding_numCodes, querySequence, scoreMatrix); newUngappedExtension->next = NULL; newUngappedExtension->nominalScore = ungappedExtension_bestScore; newUngappedExtension->status = ungappedExtension_UNGAPPED; newUngappedExtension->sequenceCount = sequenceCount; // newUngappedExtension->tid = tid; // Shucai // Record the number of hits satisfying the next step //(*numOfTriggerExtensions)++; } } __global__ void search_protein_hit_detection_prescan( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, unsigned char *wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, unsigned int groupNum) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int bin_numHits_share[BLOCK_SIZE * BIN_X]; unsigned int *bin_numHits = bin_numHits_share + (tt >> 5 << BIN_POWER); for (int ii = 0; ii < BIN_X; ii++) bin_numHits[laneId + ii * 32] = 0; int *blast_numHits = blast_numHits_g + (warpId << BIN_POWER); unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint4 sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(bin_numHits[bin_id]), 1); #endif queryOffsets++; } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; } for (int ii = 0; ii < BIN_X; ii++) blast_numHits[laneId + ii * 32] = bin_numHits[laneId + ii * 32]; } __global__ void search_protein_hit_detection( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, const unsigned char *__restrict wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, uint64_t *HitInfo_g, unsigned int groupNum, int *binOffset_g) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int hitsDia_share[BLOCK_SIZE * BIN_X]; __shared__ int binOffset_share[BLOCK_SIZE * BIN_X]; unsigned int *hitsDia = hitsDia_share + (tt >> 5 << BIN_POWER); int *binOffset_t = binOffset_share + (tt >> 5 << BIN_POWER); // uint2 hitslast = 0; for (int ii = 0; ii < BIN_X; ii++) { int bin_id = (warpId << BIN_POWER) + laneId + ii * 32; binOffset_t[laneId + ii * 32] = binOffset_g[bin_id] - blast_numHits_g[bin_id]; hitsDia[laneId + ii * 32] = 0; } unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint64_t sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(hitsDia[bin_id]), 1); unsigned int bin_p = binOffset_t[bin_id] + nHits; HitInfo_g[bin_p] = (sequenceCount << 32) + ((diagonal + 0x3fff) << 16) + subjectOffset; queryOffsets++; #endif } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; } } __global__ void search_protein_hit_detection_warp( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, const unsigned char *__restrict wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, uint64_t *HitInfo_g, unsigned int groupNum, unsigned int num_hits) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int hitsDia_share[BLOCK_SIZE * BIN_X]; __shared__ unsigned int hitsLast_share[BLOCK_SIZE * BIN_X]; unsigned int *hitsDia = hitsDia_share + (tt >> 5 << BIN_POWER); unsigned int *hitsLast = hitsLast_share + (tt >> 5 << BIN_POWER); uint64_t *HitInfo_t = HitInfo_g + num_hits * (warpId << BIN_POWER); // uint64_t *HitInfo_b = HitInfo_g + num_hits * (warpId << 7); // int *hitsoffset_warp = hitsoffset_g + NUM_SEQS * (warpId << 5); // int *hitsoffset_b = hitsoffset_g + NUM_SEQS * (warpId << 7); // int *hitsoffset = hitsoffset_warp + laneId * NUM_SEQS; // hitsoffset[0] = 0; // hitsoffset++; // uint2 hitslast = 0; for (int ii = 0; ii < BIN_X; ii++) hitsDia[laneId + ii * 32] = 0; int *blast_numHits = blast_numHits_g + (warpId << BIN_POWER); // int *blast_numSeqs = blast_numSeqs_g + (warpId << 5); unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint64_t sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(hitsDia[bin_id]), 1); unsigned int bin_p = bin_id * num_hits + nHits; HitInfo_t[bin_p] = (sequenceCount << 32) + ((diagonal + 0x3fff) << 16) + subjectOffset; // uint32_t *hit2 = (uint32_t *)(&HitInfo_t[bin_p]); // uint16_t *hit4 = (uint16_t *)(&HitInfo_t[bin_p]); // hit2[1] = sequenceCount; // hit4[1] = diagonal + 0x3fff; // hit4[0] = subjectOffset; queryOffsets++; #endif } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; for (int ii = 0; ii < BIN_X; ii++) { if ((hitsDia[laneId + ii * 32] - hitsLast[laneId + ii * 32]) < 2) { hitsDia[laneId + ii * 32] = hitsLast[laneId + ii * 32]; } hitsLast[laneId + ii * 32] = hitsDia[laneId + ii * 32]; } } for (int ii = 0; ii < BIN_X; ii++) blast_numHits[laneId + ii * 32] = hitsDia[laneId + ii * 32]; } void search_protein2hitParallel(struct scoreMatrix *scoreMatrixp, struct PSSMatrix PSSMatrix, struct PSSMatrixFP PSSMatrixFP, struct sequenceData *sequenceData_host, uint4 numSequences, uint4 tickFrequency) { // Shucai uint4 i, j, sequenceCount = 0; uint4 nRoundOffset; // PSSMatrix pointers struct PSSMatrixFP *PSSMatrixFPD; int2 *matrixBodyD; // Input database sequence struct sequenceDataFP *sequenceDataFP; struct sequenceDataFP *sequenceDataFPD; unsigned char *sequencesD; // unsigned char *sequencesH; unsigned char *roundStartAddress; // ungapped extension struct ungappedExtension *ungappedExtensionsD; struct ungappedExtension *ungappedExtension; struct ungappedExtension *ungappedExtensionCur, *newUngappedExtension, *additionalUngappedExtension; struct sequenceData *sequenceData; hipHostMalloc((void **)&sequenceData, sizeof(struct sequenceData) * numSequences); memcpy(sequenceData, sequenceData_host, sizeof(struct sequenceData) * numSequences); // ungapped extension numbers uint4 *blast_numUngappedExtensionsD, *blast_numUngappedExtensionsH; uint4 *blast_numTriggerExtensionsD, *blast_numTriggerExtensionsH; uint4 numAdditionalTriggerExtensions, numExtensions; int *blast_numHitsD, *blast_numHitsH; int *blast_numExtD; int *binOffsetD; int4 preSequenceCount; // For time record struct timeval t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; int4 wordNum, groupNum; // parameters struct parameters strParameters; struct parameters *parametersD; // word lookup table struct groupFP *wordLookupDFA_groupD; unsigned char *wordLookupDFAD; uint4 wordLookupDFA_size; // grid and block dimensions int nBlockNum = NUM_BLOCK; int nBlockSize = BLOCK_SIZE; int nTotalThreadNum = nBlockNum * nBlockSize; // get t0 gettimeofday(&t0, NULL); wordNum = wordLookupDFA_numWords; groupNum = wordLookupDFA_numGroups; // printf("\n"); // Allocate GPU buffer for PSSMatrix hipMalloc((void **)&PSSMatrixFPD, sizeof(struct PSSMatrixFP)); hipMalloc((void **)&matrixBodyD, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // printf("matrixBody: %d\n", (PSSMatrixFP.length + 2) * encoding_numCodes); char *matrixBodyH = (char *)malloc(sizeof(char) * (PSSMatrixFP.length + 2) * encoding_numCodes); for (int ii = 0; ii < PSSMatrixFP.length + 2; ii++) { for (int jj = 0; jj < encoding_numCodes; jj++) { // printf(".%d.", (PSSMatrixFP.matrix - encoding_numCodes)[ii * // encoding_numCodes + jj]); matrixBodyH[ii * encoding_numCodes + jj] = (PSSMatrixFP.matrix - encoding_numCodes)[ii * encoding_numCodes + jj]; if (ii == 0 || ii == PSSMatrixFP.length + 1 || jj == encoding_numCodes - 1) matrixBodyH[ii * encoding_numCodes + jj] = -127; // printf(".%d.", matrixBodyH[ii * encoding_numCodes + jj]); } // printf("\n"); } // Copy PSSMatrix to device memory hipMemcpy(PSSMatrixFPD, &PSSMatrixFP, sizeof(struct PSSMatrixFP), hipMemcpyHostToDevice); hipMemcpy(matrixBodyD, matrixBodyH, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes, hipMemcpyHostToDevice); free(matrixBodyH); // hipMemcpyToSymbol(matrixBody_c, (PSSMatrixFP.matrix - encoding_numCodes), // sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // Each thread is for align of one database sequence // sequenceDataFP = (struct sequenceDataFP *)global_malloc(numSequences * // sizeof(struct sequenceDataFP)); hipHostMalloc((void **)&sequenceDataFP, sizeof(struct sequenceDataFP) * numSequences); hipMalloc((void **)&sequenceDataFPD, numSequences * sizeof(struct sequenceDataFP)); // Allocate buffer for hit matrix offset // hitMatrix_offsetH = (uint4 *)global_malloc((nBlockNum + 1) * // sizeof(uint4)); // hipMalloc((void **)&hitMatrix_offsetD, (nBlockNum + 1) * sizeof(uint4)); // Allocate ungapped extension buffer on device // int4 nUngappedExtensionNum = UNGAPEXT_PER_THREAD * nTotalThreadNum; int4 nUngappedExtensionNum = TOTAL_UNGAPPED_EXT; strParameters.ungappedExtensionsPerThread = nUngappedExtensionNum / nTotalThreadNum - 1; strParameters.ungappedExtAdditionalStartLoc = strParameters.ungappedExtensionsPerThread * nTotalThreadNum; hipMalloc((void **)&ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension)); ungappedExtension = (struct ungappedExtension *)global_malloc( nUngappedExtensionNum * sizeof(struct ungappedExtension)); // Allocate numbers for ungapped extensions blast_numUngappedExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numTriggerExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numHitsH = (int *)global_malloc(sizeof(int) * nTotalThreadNum * BIN_X); hipMalloc((void **)&blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum); hipMalloc((void **)&blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum); hipMalloc((void **)&blast_numHitsD, sizeof(int) * nTotalThreadNum * BIN_X); hipMalloc((void **)&blast_numExtD, sizeof(int) * nTotalThreadNum * BIN_X); hipMalloc((void **)&binOffsetD, sizeof(int) * nTotalThreadNum * BIN_X); // Allocate device memory, about 132Mbytes (according to texture limit) hipMalloc((void **)&sequencesD, sizeof(unsigned char) * (parameters_batchSize + 50000)); // Allocate parameters buffer on device hipMalloc((void **)&parametersD, sizeof(struct parameters)); strParameters.parameters_wordSize = parameters_wordSize; strParameters.encoding_numCodes = encoding_numCodes; strParameters.wordLookupDFA_numCodes = wordLookupDFA_numCodes; strParameters.additionalQueryPositionOffset = wordNum * sizeof(char) + sizeof(int2) * wordLookupDFA_numExtPositions; strParameters.blast_ungappedNominalTrigger = blast_ungappedNominalTrigger; strParameters.statistics_ungappedNominalDropoff = statistics_ungappedNominalDropoff; strParameters.parameters_A = parameters_A; strParameters.parameters_overlap = parameters_overlap; // printf("parameters_A: %d parameters_overlap: %d\n", parameters_A, // parameters_overlap); hipMemcpy(parametersD, &strParameters, sizeof(struct parameters), hipMemcpyHostToDevice); // printf("parameters_Size: %d\n", sizeof(struct parameters)); // Allocate word lookup table wordLookupDFA_size = sizeof(char) * wordNum + 2 * sizeof(int2) * wordLookupDFA_numExtPositions; hipMalloc((void **)&wordLookupDFA_groupD, sizeof(struct groupFP) * groupNum); hipMalloc((void **)&wordLookupDFAD, wordLookupDFA_size); hipMemcpy(wordLookupDFAD, wordLookupDFA, wordLookupDFA_size, hipMemcpyHostToDevice); hipMemcpy(wordLookupDFA_groupD, wordLookupDFA_groupsFP, sizeof(struct groupFP) * groupNum, hipMemcpyHostToDevice); // printf("numDFAGroup: %d DFA_group_size: %u wordLookupDFA_size: %u\n", // groupNum, sizeof(struct groupFP) * groupNum, wordLookupDFA_size); hipMemset(blast_numUngappedExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); hipMemset(blast_numHitsD, 0, sizeof(uint4) * nBlockNum); // //Use constant memory for the word lookup table group // hipMemcpyToSymbol(wordLookupDFA_groupsC, wordLookupDFA_groupsFP, //sizeof(struct groupFP) * groupNum); // // Use constant memory to store score matrix int scoreMatrixSize = encoding_numCodes * encoding_numCodes; hipMemcpyToSymbol(scoreMatrixC, ((char *)scoreMatrixp->matrix) + sizeof(int2 *) * encoding_numCodes, sizeof(int2) * scoreMatrixSize); // Use constant memory to store query sequence unsigned char *tempQueryCode; tempQueryCode = (unsigned char *)global_malloc(sizeof(unsigned char) * (PSSMatrixFP.length + 2)); memcpy(&tempQueryCode[1], PSSMatrixFP.queryCodes, sizeof(unsigned char) * PSSMatrixFP.length); tempQueryCode[0] = encoding_sentinalCode; tempQueryCode[PSSMatrixFP.length + 1] = encoding_sentinalCode; hipMemcpyToSymbol(querySequenceC, tempQueryCode, sizeof(unsigned char) * (PSSMatrixFP.length + 2)); free(tempQueryCode); // uint4 iniVal = nTotalThreadNum; // printf("PSSMatrixSize: %d scoreMatrixSize: %d querySize: %d DFA_group: %d // DFA_qp: %d\n", sizeof(char) * (PSSMatrixFP.length + 2) * encoding_numCodes // >> 10, sizeof(int2) * scoreMatrixSize >> 10, (PSSMatrixFP.length + 2) * // sizeof(unsigned char) >> 10, (sizeof(struct groupFP) * groupNum) >> 10, // wordLookupDFA_size >> 10); // get t1 gettimeofday(&t1, NULL); int4 numSequencesRound, numSequenceProcessed; numSequenceProcessed = 0; // int totalSeqLength = 0; size_t dmem_tot = 0, dmem_free = 0; hipMemGetInfo(&dmem_free, &dmem_tot); printf("Dmem total: %d (MB) Batch size: %d (MB) Dmem free: %d (MB)\n", dmem_tot >> 20, parameters_batchSize >> 20, dmem_free >> 20); // int *HitNumD; while (numSequenceProcessed < numSequences) { // get t2 gettimeofday(&t2, NULL); hipMemset(blast_numTriggerExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); roundStartAddress = sequenceData[sequenceCount].sequence - 1; for (i = 0; sequenceCount < numSequences; i++, sequenceCount++) { sequenceDataFP[i].descriptionLength = sequenceData[sequenceCount].descriptionLength; sequenceDataFP[i].descriptionStart = sequenceData[sequenceCount].descriptionStart; sequenceDataFP[i].sequenceLength = sequenceData[sequenceCount].sequenceLength; sequenceDataFP[i].encodedLength = sequenceData[sequenceCount].encodedLength; sequenceDataFP[i].offset = sequenceData[sequenceCount].sequence - roundStartAddress; // about 130MB if (sequenceDataFP[i].offset + sequenceData[sequenceCount].encodedLength > parameters_batchSize) { i++; sequenceCount++; break; } } nRoundOffset = sequenceDataFP[i - 1].offset + sequenceDataFP[i - 1].encodedLength; numSequencesRound = i; hipMemcpy(sequencesD, roundStartAddress, sizeof(unsigned char) * (nRoundOffset + 2), hipMemcpyHostToDevice); hipMemcpy(sequenceDataFPD, sequenceDataFP, sizeof(struct sequenceDataFP) * numSequencesRound, hipMemcpyHostToDevice); gettimeofday(&t3, NULL); // hipMemcpyToSymbol(global_sequenceCount, &iniVal, sizeof(uint4)); numAdditionalTriggerExtensions = 0; hipMemcpyToSymbol(global_numAdditionalTriggerExtensions, &numAdditionalTriggerExtensions, sizeof(uint4)); // get t4 gettimeofday(&t4, NULL); struct timeval s0, s1; hipError_t cudaRes; dim3 dimGrid(nBlockNum, 1); dim3 dimBlock(nBlockSize, 1); int kernel_time = 0; gettimeofday(&s0, NULL); hipLaunchKernelGGL(( search_protein_hit_detection_prescan) , dim3(dimGrid), dim3(dimBlock), sizeof(struct groupFP) * groupNum, 0, sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numHitsD, numSequencesRound, groupNum); hipDeviceSynchronize(); cudaRes = hipGetLastError(); if (cudaRes != hipSuccess) { printf("CUDA error: %s in %d\n", hipGetErrorString(cudaRes), __LINE__); exit(-1); } int total_numHits = get_total_numHits(blast_numHitsD, nTotalThreadNum * BIN_X); get_bin_offset(blast_numHitsD, binOffsetD, nTotalThreadNum * BIN_X); hipMemGetInfo(&dmem_free, &dmem_tot); size_t est_mem_usage = total_numHits * sizeof(uint64_t) * 2 + (parameters_batchSize + 50000) * sizeof(unsigned char); printf("Est mem usage: %d (MB) numSeqProc: %d percentProc: %d\n", est_mem_usage >> 20, numSequenceProcessed, 100 * numSequenceProcessed/numSequences); // printf("total_numHits: %d hit buffer memory size: %d (MB)\n", // total_numHits, sizeof(uint64_t) * total_numHits >> 20); // printf("Estimated memory usage: %d (MB)\n", (sizeof(uint64_t) * // total_numHits * 2 + BATCH_SIZE + 2000000) >> 20); uint64_t *HitInfoD; hipMalloc((void **)&HitInfoD, sizeof(uint64_t) * total_numHits); gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Detection Prescan Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif gettimeofday(&s0, NULL); hipLaunchKernelGGL(( search_protein_hit_detection) , dim3(dimGrid), dim3(dimBlock), sizeof(struct groupFP) * groupNum, 0, sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numHitsD, numSequencesRound, HitInfoD, groupNum, binOffsetD); hipDeviceSynchronize(); cudaRes = hipGetLastError(); if (cudaRes != hipSuccess) { printf("CUDA error: %s in %d\n", hipGetErrorString(cudaRes), __LINE__); exit(-1); } gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Detection Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif hit_sort_filter(blast_numHitsD, blast_numExtD, binOffsetD, HitInfoD, BLOCK_SIZE, NUM_BLOCK, BIN_X, total_numHits, strParameters.parameters_A, strParameters.parameters_overlap); gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); // printf("Hit Sort Time: %d\n", (1000000 * (s1.tv_sec - s0.tv_sec) + // s1.tv_usec - s0.tv_usec)); gettimeofday(&s0, NULL); hipLaunchKernelGGL(( ungappedExtension_twoHitExtendG_bin_sorted_sm_s) , dim3(dimGrid), dim3(dimBlock), sizeof(unsigned char) * (PSSMatrixFP.length + 2), 0, parametersD, HitInfoD, blast_numUngappedExtensionsD, sequencesD, PSSMatrixFPD, sequenceDataFPD, ungappedExtensionsD, blast_numTriggerExtensionsD, blast_numHitsD, blast_numExtD, binOffsetD); hipDeviceSynchronize(); gettimeofday(&s1, NULL); cudaRes = hipGetLastError(); if (cudaRes != hipSuccess) { printf("CUDA error: %s in %d\n", hipGetErrorString(cudaRes), __LINE__); exit(-1); } kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Extension Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif // get t5 gettimeofday(&t5, NULL); // Post processing // copy hit results back hipMemcpy(blast_numTriggerExtensionsH, blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); hipMemcpy(ungappedExtension, ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension), hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&numAdditionalTriggerExtensions, global_numAdditionalTriggerExtensions, sizeof(uint4)); // get t6 gettimeofday(&t6, NULL); // Add hits to the alignment list // Additional buffer is used, sort ungapped extensions // according to sequence index long qsort_time = 0; if (numAdditionalTriggerExtensions > 0) { additionalUngappedExtension = ungappedExtension + strParameters.ungappedExtAdditionalStartLoc; gettimeofday(&s0, NULL); qsort(additionalUngappedExtension, numAdditionalTriggerExtensions, sizeof(struct ungappedExtension), compare_ungappedextension); gettimeofday(&s1, NULL); qsort_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); } for (i = 0; i < nTotalThreadNum; i++) { // printf("%d %d\n", i, blast_numTriggerExtensionsH[i]); if (blast_numTriggerExtensionsH[i] > 0) { // ungappedExtensionCur = ungappedExtension + i * UNGAPEXT_PER_THREAD; ungappedExtensionCur = ungappedExtension + i * strParameters.ungappedExtensionsPerThread; preSequenceCount = -1; numExtensions = (blast_numTriggerExtensionsH[i] > strParameters.ungappedExtensionsPerThread) ? strParameters.ungappedExtensionsPerThread : blast_numTriggerExtensionsH[i]; gettimeofday(&s0, NULL); qsort(ungappedExtensionCur, numExtensions, sizeof(struct ungappedExtension), compare_ungappedextension); gettimeofday(&s1, NULL); qsort_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); for (j = 0; j < numExtensions; j++) { // printf("seq: %d sub: %d - %d qury: %d - %d seed: %d - %d\n", ////i, // ungappedExtensionCur[j].sequenceCount + numSequenceProcessed, // ungappedExtensionCur[j].start.subjectOffset, // ungappedExtensionCur[j].end.subjectOffset, // ungappedExtensionCur[j].start.queryOffset, // ungappedExtensionCur[j].end.queryOffset, // ungappedExtensionCur[j].seed.queryOffset, // ungappedExtensionCur[j].seed.subjectOffset //); if (ungappedExtensionCur[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = ungappedExtensionCur[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &ungappedExtensionCur[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } // Add additional extensions if (blast_numTriggerExtensionsH[i] > strParameters.ungappedExtensionsPerThread) { int tempStartLoc = findStartLoc(additionalUngappedExtension, i, numAdditionalTriggerExtensions); numExtensions = blast_numTriggerExtensionsH[i] - strParameters.ungappedExtensionsPerThread; for (j = tempStartLoc; j < numExtensions + tempStartLoc; j++) { if (additionalUngappedExtension[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = additionalUngappedExtension[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &additionalUngappedExtension[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } } blast_numTriggerExtensions += blast_numTriggerExtensionsH[i]; } } #ifdef VERBOSE printf("Extension Sort Time: %d\n", (float)qsort_time / 1000000); #endif numSequenceProcessed += numSequencesRound; // hipFree(hitMatrix_furthestD); // get t7 gettimeofday(&t7, NULL); // gapped extension for the current chunk of sequences on the GPU // alignments_fingGoodAlignmentsGPU(&PSSMatrixFPD, //GPU buffer // PSSMatrixFP, // scoreMatrixp, //&matrixBodyD, //GPU buffer //&sequenceDataFPD[w], //GPU buffer //&sequencesD[w], //GPU buffer // nRoundOffset); // use cpu for gapped extension // alignments_findGoodAlignments(PSSMatrix, PSSMatrixFP); // get t9 gettimeofday(&t9, NULL); timeRecord.gappedAlignmentTime += (1000000 * (t9.tv_sec - t7.tv_sec) + t9.tv_usec - t7.tv_usec); // aggregate execution time timeRecord.preProcessTime += (1000000 * (t3.tv_sec - t2.tv_sec) + t3.tv_usec - t2.tv_usec); timeRecord.dataCopyTimeH2D += (1000000 * (t4.tv_sec - t3.tv_sec) + t4.tv_usec - t3.tv_usec); timeRecord.searchTime += (1000000 * (t5.tv_sec - t4.tv_sec) + t5.tv_usec - t4.tv_usec); timeRecord.dataCopyTimeD2H += (1000000 * (t6.tv_sec - t5.tv_sec) + t6.tv_usec - t5.tv_usec); timeRecord.addUngappedExtensionTime += (1000000 * (t7.tv_sec - t6.tv_sec) + t7.tv_usec - t6.tv_usec); blast_numHits += total_numHits; hipFree(HitInfoD); } hipMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); for (j = 0; j < nTotalThreadNum; j++) blast_numUngappedExtensions += blast_numUngappedExtensionsH[j]; hipFree(PSSMatrixFPD); hipFree(matrixBodyD); hipFree(ungappedExtensionsD); hipFree(blast_numUngappedExtensionsD); hipFree(blast_numTriggerExtensionsD); hipFree(blast_numHitsD); hipFree(blast_numExtD); hipFree(binOffsetD); hipFree(parametersD); hipFree(wordLookupDFA_groupD); hipFree(wordLookupDFAD); hipFree(sequenceDataFPD); hipFree(sequencesD); hipFree(sequenceDataFP); free(ungappedExtension); free(blast_numUngappedExtensionsH); free(blast_numTriggerExtensionsH); free(blast_numHitsH); hipHostFree(sequenceData); // get t8 gettimeofday(&t8, NULL); // Record time timeRecord.iniTime = 1000000 * (t1.tv_sec - t0.tv_sec) + t1.tv_usec - t0.tv_usec; timeRecord.postProcessTime = 1000000 * (t8.tv_sec - t7.tv_sec) + t8.tv_usec - t7.tv_usec; timeRecord.hitUngappedExtTime = 1000000 * (t8.tv_sec - t1.tv_sec) + t8.tv_usec - t1.tv_usec; } // Embarrassingly parallel approach is used. One thread is used for // the hit detection of one sequence __global__ void search_protein1hitKernel( struct PSSMatrixFP *PSSMatrixFP, int2 *matrixBody, struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP, struct groupFP *wordLookupDFA_groupsFP, unsigned char *wordLookupDFAFP, uint4 *blast_numUngappedExtensions, uint4 *blast_numTriggerExtensions, uint4 *blast_numHits, uint4 *hitMatrix_furthestp, uint4 *hitMatrix_offsetp, struct ungappedExtension *ungappedExtension_extensionsp, uint4 nTotalSequenceNum) { int bid = blockIdx.x * gridDim.y + blockIdx.y; int tid = bid * blockDim.x * blockDim.y + threadIdx.x * blockDim.y + threadIdx.y; unsigned char *subject, *sequenceEnd, *address; int4 subjectOffset, count; unsigned char currentWord, *currentBlock; struct groupFP *currentGroupFP; uint2 *wordLookupDFA_AddiPositions; uint4 numOfTriggerExtensions = 0; uint2 *queryOffsets, queryOffset; struct ungappedExtension *ungappedExtension_current; int4 diagonal; uint4 *lastHitFP; uint4 ungappedExtension_subjectEndReachedFP; uint4 *hitMatrix_Local; uint4 sequenceCount; hitMatrix_Local = hitMatrix_furthestp + hitMatrix_offsetp[tid] + PSSMatrixFP->length; ungappedExtension_extensionsp->start.subjectOffset = 0; ungappedExtension_current = ungappedExtension_extensionsp + tid * UNGAPEXT_PER_THREAD; wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP->additionalQueryPositionOffset); // Set the PSSMatrix body PSSMatrixFP->matrix = matrixBody + parametersFP->encoding_numCodes; sequenceCount = tid; while (sequenceCount < nTotalSequenceNum) { subject = address = sequence + sequenceDataFP[sequenceCount].offset; if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP->parameters_wordSize) { currentGroupFP = wordLookupDFA_groupsFP; // currentGroupFP = wordLookupDFA_groupsC; count = 1; while (count < parametersFP->parameters_wordSize) { if (*address < parametersFP->wordLookupDFA_numCodes) { currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups + *address]; // currentGroupFP = &wordLookupDFA_groupsC[currentGroupFP->nextGroups // + *address]; } else { currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups]; // currentGroupFP = // &wordLookupDFA_groupsC[currentGroupFP->nextGroups]; } address++; count++; } sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; while (address < sequenceEnd) { currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter if (*address < parametersFP->wordLookupDFA_numCodes) { currentWord = currentBlock[*address]; currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups + *address]; // currentGroupFP = &wordLookupDFA_groupsC[currentGroupFP->nextGroups // + *address]; } else { if (address >= sequenceEnd) break; currentWord = *currentBlock; currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups]; // currentGroupFP = // &wordLookupDFA_groupsC[currentGroupFP->nextGroups]; } if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!*queryOffsets) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (*(queryOffsets + 1) * constants_max_int2) + *(queryOffsets + 2); } do { queryOffset = *queryOffsets; #ifndef NO_STAGE2 // Calculate the diagonal this hit is on diagonal = subjectOffset - queryOffset; // If we have not extended past this point on this diagonal lastHitFP = hitMatrix_Local + diagonal; if (*lastHitFP < address - sequence) { // Number of extensions for each subject sequence blast_numUngappedExtensions[tid]++; // If only one hit triggered this extension ungappedExtension_oneHitExtendD( sequence, queryOffset, address, *PSSMatrixFP, subject, &ungappedExtension_subjectEndReachedFP, parametersFP->encoding_numCodes, parametersFP->statistics_ungappedNominalDropoff, parametersFP->blast_ungappedNominalTrigger, ungappedExtension_current, &numOfTriggerExtensions, sequenceCount, tid); // Update furthest reached value for the diagonal *lastHitFP = ungappedExtension_subjectEndReachedFP; } #endif queryOffsets++; blast_numHits[tid]++; } while (*queryOffsets); } address++; } } // option======================================================= // sequenceCount = atomicAdd(&global_sequenceCount, 1); sequenceCount += gridDim.x * blockDim.x; //============================================================ } blast_numTriggerExtensions[tid] = (uint4)numOfTriggerExtensions; return; } __device__ struct ungappedExtension *ungappedExtension_oneHitExtendD( unsigned char *sequenceStart, int4 queryOffset, unsigned char *subjectHit, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, uint4 *sequenceHitEnd, unsigned char encoding_numCodes, int4 statistics_ungappedNominalDropoff, int4 blast_ungappedNominalTrigger, struct ungappedExtension *ungappedExtension_extensions, uint4 *numOfTriggerExtensions, uint4 sequenceCount, int4 tid) { int2 *queryPosition; // int4 queryPosition; unsigned char *subjectPosition, *subjectStart, *subjectEnd; int4 changeSinceBest = 0; int4 dropoff, originalDropoff; int4 ungappedExtension_bestScore; originalDropoff = dropoff = -statistics_ungappedNominalDropoff; ungappedExtension_bestScore = 0; // Start at queryEnd,subjectEnd (right/last hit position) queryPosition = PSSMatrixFP.matrix + queryOffset * encoding_numCodes; // queryPosition = queryOffset + 1; subjectPosition = subjectStart = subjectHit; while (changeSinceBest > dropoff) { changeSinceBest += queryPosition[*subjectPosition]; // changeSinceBest += scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; queryPosition = queryPosition - encoding_numCodes; // queryPosition = queryPosition - 1; subjectPosition--; changeSinceBest = queryPosition[*subjectPosition]; // changeSinceBest = scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectStart = subjectPosition; } queryPosition = queryPosition - encoding_numCodes; // queryPosition = queryPosition - 1; subjectPosition--; } // Correct for extra decrement subjectStart++; // Starting at right/last hit position again queryPosition = PSSMatrixFP.matrix + (queryOffset + 1) * encoding_numCodes; // queryPosition = (queryOffset + 2); subjectPosition = subjectEnd = subjectHit + 1; changeSinceBest = 0; // May need to alter dropoff so we also dropoff if below zero if (-ungappedExtension_bestScore > originalDropoff) { dropoff = -ungappedExtension_bestScore; } // Extend end of alignment until dropoff while (changeSinceBest > dropoff) { // Shucai changeSinceBest += queryPosition[*subjectPosition]; // changeSinceBest += scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; queryPosition = queryPosition + encoding_numCodes; // queryPosition = queryPosition + 1; subjectPosition++; changeSinceBest = queryPosition[*subjectPosition]; // changeSinceBest = scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectEnd = subjectPosition; // Check need for change in dropoff if ((dropoff = -ungappedExtension_bestScore) < originalDropoff) { dropoff = originalDropoff; } } queryPosition = queryPosition + encoding_numCodes; // queryPosition = queryPosition + 1; subjectPosition++; } subjectEnd--; //*sequenceHitEnd = subjectPosition - subject; *sequenceHitEnd = subjectPosition - sequenceStart; if (ungappedExtension_bestScore >= blast_ungappedNominalTrigger) { int4 diagonal; struct ungappedExtension *newUngappedExtension = NULL; newUngappedExtension = &ungappedExtension_extensions[*numOfTriggerExtensions]; // Calculate diagonal diagonal = (subjectHit - subject) - queryOffset; // Determine offsets from pointers newUngappedExtension->start.subjectOffset = subjectStart - subject; newUngappedExtension->end.subjectOffset = subjectEnd - subject; newUngappedExtension->start.queryOffset = newUngappedExtension->start.subjectOffset - diagonal; newUngappedExtension->end.queryOffset = newUngappedExtension->end.subjectOffset - diagonal; // newUngappedExtension->seed = // ungappedExtension_findProteinSeed(newUngappedExtension, // PSSMatrixFP, subject, encoding_numCodes); newUngappedExtension->next = NULL; newUngappedExtension->nominalScore = ungappedExtension_bestScore; newUngappedExtension->status = ungappedExtension_UNGAPPED; newUngappedExtension->sequenceCount = sequenceCount; // Shucai // Record the number of hits satisfying the next step (*numOfTriggerExtensions)++; return newUngappedExtension; } else { return NULL; } } // Shucai // Search a protein database using 1-hit extension mode void search_protein1hitParallel(struct scoreMatrix *scoreMatrixp, struct PSSMatrixFP PSSMatrixFP, struct sequenceData *sequenceData, uint4 numSequences, uint4 tickFrequency) { // Shucai uint4 i, j, sequenceCount = 0; uint4 nRoundOffset; // PSSMatrix pointers struct PSSMatrixFP *PSSMatrixFPD; int2 *matrixBodyD; // Input database sequence struct sequenceDataFP *sequenceDataFP; struct sequenceDataFP *sequenceDataFPD; unsigned char *sequencesD; unsigned char *roundStartAddress; // ungapped extension struct ungappedExtension *ungappedExtensionsD; struct ungappedExtension *ungappedExtension; struct ungappedExtension *ungappedExtensionCur, *newUngappedExtension; // ungapped extension numbers uint4 *blast_numUngappedExtensionsD, *blast_numUngappedExtensionsH; uint4 *blast_numTriggerExtensionsD, *blast_numTriggerExtensionsH; uint4 *blast_numHitsD, *blast_numHitsH; uint4 *hitMatrix_furthestD; uint4 *hitMatrix_offsetH; uint4 *hitMatrix_offsetD; uint4 preSequenceCount; // For time record struct timeval t0, t1, t2, t3, t4, t5, t6, t7, t8; int4 wordNum, groupNum; // parameters struct parameters strParameters; struct parameters *parametersD; // word lookup table struct groupFP *wordLookupDFA_groupD; unsigned char *wordLookupDFAD; uint4 wordLookupDFA_size; // grid and block dimensions int nBlockNum = NUM_BLOCK; int nBlockSize = BLOCK_SIZE; int nTotalThreadNum = nBlockNum * nBlockSize; dim3 dimGrid(nBlockNum, 1); dim3 dimBlock(nBlockSize, 1); // get t0 gettimeofday(&t0, NULL); wordNum = wordLookupDFA_numWords; groupNum = wordLookupDFA_numGroups; // Allocate GPU buffer for PSSMatrix hipMalloc((void **)&PSSMatrixFPD, sizeof(struct PSSMatrixFP)); hipMalloc((void **)&matrixBodyD, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // Copy PSSMatrix to device memory hipMemcpy(PSSMatrixFPD, &PSSMatrixFP, sizeof(struct PSSMatrixFP), hipMemcpyHostToDevice); hipMemcpy(matrixBodyD, (PSSMatrixFP.matrix - encoding_numCodes), sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes, hipMemcpyHostToDevice); // Each thread is for align of one database sequence sequenceDataFP = (struct sequenceDataFP *)global_malloc( numSequences * sizeof(struct sequenceDataFP)); hipMalloc((void **)&sequenceDataFPD, numSequences * sizeof(struct sequenceDataFP)); // Allocate buffer for hit matrix offset hitMatrix_offsetH = (uint4 *)global_malloc((nTotalThreadNum + 1) * sizeof(uint4)); hipMalloc((void **)&hitMatrix_offsetD, (nTotalThreadNum + 1) * sizeof(uint4)); // Allocate ungapped extension buffer on device int4 nUngappedExtensionNum = UNGAPEXT_PER_THREAD * nTotalThreadNum; hipMalloc((void **)&ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension)); ungappedExtension = (struct ungappedExtension *)global_malloc( nUngappedExtensionNum * sizeof(struct ungappedExtension)); // Allocate numbers for ungapped extensions blast_numUngappedExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numTriggerExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numHitsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); hipMalloc((void **)&blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum); hipMalloc((void **)&blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum); hipMalloc((void **)&blast_numHitsD, sizeof(uint4) * nTotalThreadNum); // Allocate device memory, about 132Mbytes (according to texture limit) hipMalloc((void **)&sequencesD, sizeof(unsigned char) * 132000000); // Allocate parameters buffer on device hipMalloc((void **)&parametersD, sizeof(struct parameters)); strParameters.parameters_wordSize = parameters_wordSize; strParameters.encoding_numCodes = encoding_numCodes; strParameters.wordLookupDFA_numCodes = wordLookupDFA_numCodes; strParameters.additionalQueryPositionOffset = wordNum * sizeof(char) + sizeof(int2) * wordLookupDFA_numExtPositions; strParameters.blast_ungappedNominalTrigger = blast_ungappedNominalTrigger; strParameters.statistics_ungappedNominalDropoff = statistics_ungappedNominalDropoff; hipMemcpy(parametersD, &strParameters, sizeof(struct parameters), hipMemcpyHostToDevice); // Allocate word lookup table wordLookupDFA_size = sizeof(char) * wordNum + 2 * sizeof(int2) * wordLookupDFA_numExtPositions; hipMalloc((void **)&wordLookupDFA_groupD, sizeof(struct groupFP) * groupNum); hipMalloc((void **)&wordLookupDFAD, wordLookupDFA_size); hipMemset(blast_numUngappedExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); hipMemset(blast_numHitsD, 0, sizeof(uint4) * nTotalThreadNum); hipMemcpy(wordLookupDFA_groupD, wordLookupDFA_groupsFP, sizeof(struct groupFP) * groupNum, hipMemcpyHostToDevice); // //Use constant memory for the word lookup table group // hipMemcpyToSymbol(wordLookupDFA_groupsC, wordLookupDFA_groupsFP, //sizeof(struct groupFP) * groupNum); // // //Use constant memory to store score matrix // int scoreMatrixSize = encoding_numCodes * encoding_numCodes; // hipMemcpyToSymbol(scoreMatrixC, // ((char *)scoreMatrixp->matrix) + sizeof(int2 *) * //encoding_numCodes, // sizeof(int2) * scoreMatrixSize); // //Use constant memory to store query sequence // unsigned char *tempQueryCode; // tempQueryCode = (unsigned char *)global_malloc(sizeof(unsigned char) * //(PSSMatrixFP.length + 2)); // memcpy(&tempQueryCode[1], PSSMatrixFP.queryCodes, sizeof(unsigned char) //* PSSMatrixFP.length); // tempQueryCode[0] = encoding_sentinalCode; // tempQueryCode[PSSMatrixFP.length + 1] = encoding_sentinalCode; // hipMemcpyToSymbol(querySequenceC, tempQueryCode, sizeof(unsigned char) //* (PSSMatrixFP.length + 2)); // free(tempQueryCode); hipMemcpy(wordLookupDFAD, wordLookupDFA, wordLookupDFA_size, hipMemcpyHostToDevice); // uint4 iniVal = nTotalThreadNum; // get t1 gettimeofday(&t1, NULL); int4 numSequencesRound, numSequenceProcessed; numSequenceProcessed = 0; while (sequenceCount < numSequences) { // get t2 gettimeofday(&t2, NULL); memset(hitMatrix_offsetH, 0, sizeof(int4) * (nTotalThreadNum + 1)); roundStartAddress = sequenceData[sequenceCount].sequence - 1; for (i = 0; sequenceCount < numSequences; i++, sequenceCount++) { sequenceDataFP[i].descriptionLength = sequenceData[sequenceCount].descriptionLength; sequenceDataFP[i].descriptionStart = sequenceData[sequenceCount].descriptionStart; sequenceDataFP[i].sequenceLength = sequenceData[sequenceCount].sequenceLength; sequenceDataFP[i].encodedLength = sequenceData[sequenceCount].encodedLength; sequenceDataFP[i].offset = sequenceData[sequenceCount].sequence - roundStartAddress; // Calculate the longest sequence size aligned by the current thread if (sequenceDataFP[i].sequenceLength > hitMatrix_offsetH[(i % nTotalThreadNum) + 1]) { hitMatrix_offsetH[(i % nTotalThreadNum) + 1] = sequenceDataFP[i].sequenceLength; } // about 130MB if (sequenceDataFP[i].offset + sequenceData[sequenceCount].encodedLength > 130000000) { i++; sequenceCount++; break; } } nRoundOffset = sequenceDataFP[i - 1].offset + sequenceDataFP[i - 1].encodedLength; numSequencesRound = i; // Calculate the offset of each thread for (i = 1; i < nTotalThreadNum + 1; i++) { hitMatrix_offsetH[i] += hitMatrix_offsetH[i - 1] + (PSSMatrixFP.length - parameters_wordSize + 1); } // copy offset info to device hipMemcpy(hitMatrix_offsetD, hitMatrix_offsetH, (nTotalThreadNum + 1) * sizeof(int4), hipMemcpyHostToDevice); // get t3 gettimeofday(&t3, NULL); // Allocate device memory // hipMalloc((void **)&sequencesD, sizeof(unsigned char) * (nRoundOffset //+ 2)); // Allocate diagonal buffers int nElemNum = hitMatrix_offsetH[nTotalThreadNum]; hipMalloc((void **)&hitMatrix_furthestD, sizeof(uint4) * nElemNum); hipMemset(hitMatrix_furthestD, 0, sizeof(uint4) * nElemNum); hipMemset(blast_numTriggerExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); // Copy data to device hipMemcpy(sequenceDataFPD, sequenceDataFP, sizeof(struct sequenceDataFP) * numSequencesRound, hipMemcpyHostToDevice); hipMemcpy(sequencesD, roundStartAddress, sizeof(unsigned char) * (nRoundOffset + 2), hipMemcpyHostToDevice); // hipMemcpyToSymbol(global_sequenceCount, &iniVal, sizeof(uint4)); // get t4 gettimeofday(&t4, NULL); // all the required data are copied to device, launch the kernel search_protein1hitKernel << <dimGrid, dimBlock>>> (PSSMatrixFPD, matrixBodyD, sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numUngappedExtensionsD, blast_numTriggerExtensionsD, blast_numHitsD, hitMatrix_furthestD, hitMatrix_offsetD, ungappedExtensionsD, numSequencesRound); hipDeviceSynchronize(); // get t5 gettimeofday(&t5, NULL); // Post processing // copy hit results back hipMemcpy(blast_numTriggerExtensionsH, blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); hipMemcpy(ungappedExtension, ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension), hipMemcpyDeviceToHost); hipMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); // get t6 gettimeofday(&t6, NULL); // Add hits to the alignment list for (i = 0; i < nTotalThreadNum; i++) { if (blast_numTriggerExtensionsH[i] > 0) { ungappedExtensionCur = ungappedExtension + i * UNGAPEXT_PER_THREAD; preSequenceCount = INT_MAX; for (j = 0; j < blast_numTriggerExtensionsH[i]; j++) { if (ungappedExtensionCur[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = ungappedExtensionCur[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &ungappedExtensionCur[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } blast_numTriggerExtensions += blast_numTriggerExtensionsH[i]; } } numSequenceProcessed += numSequencesRound; hipFree(hitMatrix_furthestD); // hipFree(sequencesD); // get t7 gettimeofday(&t7, NULL); // aggregate execution time timeRecord.preProcessTime += (1000000 * (t3.tv_sec - t2.tv_sec) + t3.tv_usec - t2.tv_usec); timeRecord.dataCopyTimeH2D += (1000000 * (t4.tv_sec - t3.tv_sec) + t4.tv_usec - t3.tv_usec); timeRecord.searchTime += (1000000 * (t5.tv_sec - t4.tv_sec) + t5.tv_usec - t4.tv_usec); timeRecord.dataCopyTimeD2H += (1000000 * (t6.tv_sec - t5.tv_sec) + t6.tv_usec - t5.tv_usec); timeRecord.addUngappedExtensionTime += (1000000 * (t7.tv_sec - t6.tv_sec) + t7.tv_usec - t6.tv_usec); } // After all sequences are processed hipMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); hipMemcpy(blast_numHitsH, blast_numHitsD, sizeof(uint4) * nTotalThreadNum, hipMemcpyDeviceToHost); for (j = 0; j < nTotalThreadNum; j++) { blast_numUngappedExtensions += blast_numUngappedExtensionsH[j]; blast_numHits += blast_numHitsH[j]; } hipFree(PSSMatrixFPD); hipFree(matrixBodyD); hipFree(sequenceDataFPD); hipFree(ungappedExtensionsD); hipFree(blast_numUngappedExtensionsD); hipFree(blast_numTriggerExtensionsD); hipFree(blast_numHitsD); hipFree(parametersD); hipFree(wordLookupDFA_groupD); hipFree(wordLookupDFAD); hipFree(hitMatrix_offsetD); hipFree(sequencesD); free(sequenceDataFP); free(ungappedExtension); free(blast_numUngappedExtensionsH); free(blast_numTriggerExtensionsH); free(blast_numHitsH); free(hitMatrix_offsetH); // get t8 gettimeofday(&t8, NULL); // Record time timeRecord.iniTime = 1000000 * (t1.tv_sec - t0.tv_sec) + t1.tv_usec - t0.tv_usec; timeRecord.postProcessTime = 1000000 * (t8.tv_sec - t7.tv_sec) + t8.tv_usec - t7.tv_usec; timeRecord.hitUngappedExtTime = 1000000 * (t8.tv_sec - t1.tv_sec) + t8.tv_usec - t1.tv_usec; } int findStartLoc(struct ungappedExtension *ungappedExtensionsPtr, int threadNo, int itemNum) { int i; for (i = 0; i < itemNum; i++) { if (ungappedExtensionsPtr[i].tid == threadNo) { return i; } } return -1; } __device__ struct coordinate ungappedExtension_findProteinSeed_sm( struct ungappedExtension *ungappedExtension, // struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes, // char *matrix unsigned char *querySequence, int2 *scoreMatrix) { // char *queryWindowStart, *queryWindowEnd; int2 queryWindowStart, queryWindowEnd; unsigned char *subjectWindowStart, *subjectWindowEnd; int2 bestQueryPosition; unsigned char *bestSubjectPosition; int4 bestSegmentScore; int4 nominalScore, count; struct coordinate seed; if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11) { // The seed point is the middle of the extension seed.queryOffset = (ungappedExtension->end.queryOffset + ungappedExtension->start.queryOffset) / 2; seed.subjectOffset = (ungappedExtension->end.subjectOffset + ungappedExtension->start.subjectOffset) / 2; } else { // Else find the highest scoring length-11 segment of the ungapped extension // queryWindowStart = queryWindowEnd = matrix + // ungappedExtension->start.queryOffset * encoding_numCodes; queryWindowStart = queryWindowEnd = ungappedExtension->start.queryOffset + 1; // subjectWindowStart = subjectWindowEnd = subject + // ungappedExtension->start.subjectOffset; subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset; // Find initial score for first 11 positions nominalScore = 0; count = 0; while (count < 11) { // nominalScore += queryWindowEnd[*subjectWindowEnd]; // queryWindowEnd += encoding_numCodes; nominalScore += scoreMatrix[querySequence[queryWindowEnd] * encoding_numCodes + (*subjectWindowEnd)]; queryWindowEnd++; subjectWindowEnd++; count++; } // queryWindowEnd -= encoding_numCodes; queryWindowEnd--; subjectWindowEnd--; // By default first-11 positions gives best position and score bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; bestSegmentScore = nominalScore; // Now slide the window across and record the better scores/positions // while (queryWindowEnd < matrix + ungappedExtension->end.queryOffset * // encoding_numCodes) while (queryWindowEnd < ungappedExtension->end.queryOffset + 1) { // Advance window end, add new position value // queryWindowEnd += encoding_numCodes; queryWindowEnd++; subjectWindowEnd++; // nominalScore += queryWindowEnd[*subjectWindowEnd]; nominalScore += scoreMatrix[querySequence[queryWindowEnd] * encoding_numCodes + (*subjectWindowEnd)]; // Remove position that we will leave behind // nominalScore -= queryWindowStart[*subjectWindowStart]; nominalScore -= scoreMatrix[querySequence[queryWindowStart] * encoding_numCodes + (*subjectWindowStart)]; // queryWindowStart += encoding_numCodes; queryWindowStart++; subjectWindowStart++; // Check if best window position yet if (nominalScore > bestSegmentScore) { bestSegmentScore = nominalScore; bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; } } // Middle of the best window is the seed position seed.queryOffset = (bestQueryPosition - 1) + 5; seed.subjectOffset = bestSubjectPosition + 5 - subject; } return seed; } __global__ void ungappedExtension_twoHitExtendG_findProteinSeed( struct parameters *parametersFP_g, struct PSSMatrixFP *PSSMatrixFP_g, unsigned char *sequence, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; struct parameters *parametersFP = parametersFP_g; __shared__ int2 scoreMatrix_s[1024]; extern __shared__ unsigned char querySequence_s[]; unsigned encoding_numCodes = parametersFP->encoding_numCodes; for (unsigned int ii = tt; ii < encoding_numCodes * encoding_numCodes; ii += blockDim.x) { scoreMatrix_s[ii] = scoreMatrixC[ii]; } for (unsigned int ii = tt; ii < PSSMatrixFP_g->length + 2; ii += blockDim.x) { querySequence_s[ii] = querySequenceC[ii]; } __syncthreads(); struct ungappedExtension *ungappedExtension_current = ungappedExtension_extensionsp + tid * parametersFP->ungappedExtensionsPerThread; // unsigned int tBins = BLOCK_SIZE * NUM_BLOCK; // for( unsigned int bb = tid; bb < tBins; bb += gridDim.x * blockDim.x) { unsigned int numExtensions = blast_numTriggerExtensions[tid]; for (unsigned int w_id = 0; w_id < numExtensions; w_id++) { int4 sequenceCount = ungappedExtension_current[w_id].sequenceCount; unsigned char *subject = sequence + sequenceDataFP[sequenceCount].offset; ungappedExtension_current[w_id].seed = ungappedExtension_findProteinSeed_sm( &(ungappedExtension_current[w_id]), subject, encoding_numCodes, querySequence_s, scoreMatrix_s); } } } __global__ void ungappedExtension_twoHitExtendG_bin_sorted_sm_s( struct parameters *parametersFP, uint64_t *HitInfo_g, uint4 *blast_numUngappedExtensions, unsigned char *sequence, struct PSSMatrixFP *PSSMatrixFP, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions, int *numOneHitsD, int *numExtD, int *binOffset_g) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; int laneId = tt & 31; // int warpId = tid >> 5; struct ungappedExtension *ungappedExtension_additional = ungappedExtension_extensionsp + parametersFP->ungappedExtAdditionalStartLoc; unsigned char *ungappedExtension_subjectEndReachedFP = 0; __shared__ int2 scoreMatrix_s[1024]; extern __shared__ unsigned char querySequence_s[]; unsigned encoding_numCodes = parametersFP->encoding_numCodes; for (unsigned int ii = tt; ii < encoding_numCodes * encoding_numCodes; ii += blockDim.x) { scoreMatrix_s[ii] = scoreMatrixC[ii]; } for (unsigned int ii = tt; ii < PSSMatrixFP->length + 2; ii += blockDim.x) { querySequence_s[ii] = querySequenceC[ii]; } __syncthreads(); // uint4 numOfTriggerExtensions_s = 0; __shared__ uint4 numOfTriggerExtensions_s[BLOCK_SIZE]; uint4 *numOfTriggerExtensions_w = numOfTriggerExtensions_s + (tt >> 5 << 5); // uint4 *numOfTriggerExtensions_w = numOfTriggerExtensions_s; numOfTriggerExtensions_w[laneId] = 0; // numOfTriggerExtensions_w[tt] = 0; uint4 blast_numUngappedExtensions_s = 0; // ungappedExtension_extensionsp->start.subjectOffset = 0; struct ungappedExtension *ungappedExtension_w = ungappedExtension_extensionsp + (tid >> 5 << 5) * parametersFP->ungappedExtensionsPerThread; // struct ungappedExtension *ungappedExtension_w = // ungappedExtension_extensionsp + // blockIdx.x * BLOCK_SIZE * parametersFP->ungappedExtensionsPerThread; unsigned int num_bins = BLOCK_SIZE * NUM_BLOCK * BIN_X; for (unsigned int bb = tid; bb < num_bins; bb += gridDim.x * blockDim.x) // unsigned int b_start = warpId << BIN_POWER; // unsigned int b_end = (warpId + 1) << BIN_POWER; // for(unsigned int bb = b_start + laneId; bb < b_end; bb += 32) { uint64_t *HitInfo_t = HitInfo_g + binOffset_g[bb] - numOneHitsD[bb]; unsigned int numHits_t = numExtD[bb]; uint64_t prev_ext = 0; for (unsigned int w_id = 0; w_id < numHits_t; w_id += 2) { uint64_t prev = HitInfo_t[w_id] > prev_ext ? HitInfo_t[w_id] : prev_ext; uint64_t curr = HitInfo_t[w_id + 1]; if (prev < curr) { blast_numUngappedExtensions_s++; uint4 sequenceCount = (uint4)(curr >> 32); int2 diagonal = (int2)((curr >> 16) & 0xffff) - 0x3fff; uint2 subjectOffset = (uint2)(curr & 0xffff); uint2 queryOffset = subjectOffset - diagonal; unsigned char *subject = sequence + sequenceDataFP[sequenceCount].offset; unsigned char *address = subject + subjectOffset; unsigned char *lastHit_addr = subject + (uint2)(prev & 0xffff); // int bin_id = sequenceCount & 127; int bin_id = sequenceCount & 31; struct ungappedExtension *ungappedExtension_current = ungappedExtension_w + bin_id * parametersFP->ungappedExtensionsPerThread; // If only one hit triggered this extension ungappedExtension_twoHitExtendD_sm( sequence, queryOffset, address, lastHit_addr, subject, &ungappedExtension_subjectEndReachedFP, parametersFP->encoding_numCodes, parametersFP->statistics_ungappedNominalDropoff, parametersFP->blast_ungappedNominalTrigger, parametersFP->ungappedExtensionsPerThread, ungappedExtension_current, ungappedExtension_additional, &(numOfTriggerExtensions_w[bin_id]), sequenceCount, scoreMatrix_s, querySequence_s); prev_ext = (curr & 0xffffffffffff0000) + (ungappedExtension_subjectEndReachedFP - subject); } } } blast_numTriggerExtensions[tid] = numOfTriggerExtensions_w[laneId]; blast_numUngappedExtensions[tid] += blast_numUngappedExtensions_s; } __device__ struct coordinate ungappedExtension_findProteinSeed( struct ungappedExtension *ungappedExtension, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes) { int2 *queryWindowStart, *queryWindowEnd; unsigned char *subjectWindowStart, *subjectWindowEnd; int2 *bestQueryPosition; unsigned char *bestSubjectPosition; int4 bestSegmentScore; int4 nominalScore, count; struct coordinate seed; if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11) { // The seed point is the middle of the extension seed.queryOffset = (ungappedExtension->end.queryOffset + ungappedExtension->start.queryOffset) / 2; seed.subjectOffset = (ungappedExtension->end.subjectOffset + ungappedExtension->start.subjectOffset) / 2; } else { // Else find the highest scoring length-11 segment of the ungapped extension queryWindowStart = queryWindowEnd = PSSMatrixFP.matrix + ungappedExtension->start.queryOffset * encoding_numCodes; subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset; // Find initial score for first 11 positions nominalScore = 0; count = 0; while (count < 11) { nominalScore += queryWindowEnd[*subjectWindowEnd]; queryWindowEnd += encoding_numCodes; subjectWindowEnd++; count++; } queryWindowEnd -= encoding_numCodes; subjectWindowEnd--; // By default first-11 positions gives best position and score bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; bestSegmentScore = nominalScore; // Now slide the window across and record the better scores/positions while (queryWindowEnd < PSSMatrixFP.matrix + ungappedExtension->end.queryOffset * encoding_numCodes) { // Advance window end, add new position value queryWindowEnd += encoding_numCodes; subjectWindowEnd++; nominalScore += queryWindowEnd[*subjectWindowEnd]; // Remove position that we will leave behind nominalScore -= queryWindowStart[*subjectWindowStart]; queryWindowStart += encoding_numCodes; subjectWindowStart++; // Check if best window position yet if (nominalScore > bestSegmentScore) { bestSegmentScore = nominalScore; bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; } } // Middle of the best window is the seed position seed.queryOffset = (bestQueryPosition - PSSMatrixFP.matrix) / encoding_numCodes + 5; seed.subjectOffset = bestSubjectPosition + 5 - subject; } return seed; }
fa364f0e3b05227456b720765af202574cc51948.cu
/* * cuBLASTP - Fine-Grained Parallelization of Protein Sequence Search on CPU+GPU * Version 0.1 (beta) * * (c) 2015 Virginia Polytechnic Institute & State University (Virginia Tech) * This version of cuBLASTP is licensed for non-commercial use only, * as specified in LICENSE files in licensing directory. For all other use * contact vtiplicensing@vtip.org * * Developer: Jing Zhang * */ #include "blast.h" #include "wordLookupDFA.h" #include <math.h> #include <limits.h> #include <sys/time.h> #include <inttypes.h> #include "segsort.h" #ifdef SM_20 #define __ldg(x) (*(x)) #define NUM_BLOCK 112 #else #define NUM_BLOCK 260 #endif #define BLOCK_SIZE 128 #define BIN_X 4 // CHANGE BIN_SIZE #define BIN_POWER 7 // BIN_X #define BIN_MARK 127 // BIN_X extern unsigned char *wordLookupDFA; extern struct groupFP *wordLookupDFA_groupsFP; TIMERECORD timeRecord; struct parameters { char parameters_wordSize; unsigned char encoding_numCodes; char parameters_overlap; int4 wordLookupDFA_numCodes; uint4 additionalQueryPositionOffset; int4 statistics_ungappedNominalDropoff; int4 blast_ungappedNominalTrigger; int4 parameters_A; uint4 ungappedExtensionsPerThread; uint4 ungappedExtAdditionalStartLoc; }; #define TARGET_THREAD 0 #define UNGAPEXT_PER_THREAD 150 #define TOTAL_UNGAPPED_EXT 500000 __device__ __constant__ int2 scoreMatrixC[1640]; __device__ __constant__ unsigned char querySequenceC[40000]; int compare_ungappedextension(const void *a, const void *b) { return ((struct ungappedExtension *)a)->sequenceCount - ((struct ungappedExtension *)b)->sequenceCount; } int findStartLoc(struct ungappedExtension *ungappedExtensionsPtr, int threadNo, int itemNum); __device__ struct ungappedExtension *ungappedExtension_oneHitExtendD( unsigned char *, int4, unsigned char *, struct PSSMatrixFP, unsigned char *, uint4 *, unsigned char, int4, int4, struct ungappedExtension *, uint4 *, uint4, int4); __device__ uint4 global_numAdditionalTriggerExtensions; __device__ struct coordinate ungappedExtension_findProteinSeed( struct ungappedExtension *ungappedExtension, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes); __device__ struct coordinate ungappedExtension_findProteinSeed_sm( struct ungappedExtension *ungappedExtension, // struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes, unsigned char *querySequence, int2 *scoreMatrix); __global__ void ungappedExtension_twoHitExtendG_findProteinSeed( struct parameters *parametersFP_g, struct PSSMatrixFP *PSSMatrixFP_g, unsigned char *sequence, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions); __global__ void ungappedExtension_twoHitExtendG_bin_sorted_sm_s( struct parameters *parametersFP_g, // char *matrixBody_g, uint64_t *HitInfo_g, // uint2 *num_hits, uint4 *blast_numUngappedExtensions, unsigned char *sequence, // unsigned char **hitMatrix_furthestp, // uint4 *hitMatrix_offsetp, struct PSSMatrixFP *PSSMatrixFP_g, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions, int *numOneHitsD, int *numExtD, int *binOffset_g); __device__ void ungappedExtension_twoHitExtendD_sm( unsigned char *sequenceStart, int4 queryOffset, unsigned char *subjectHit, unsigned char *lastHitFP, // struct PSSMatrixFP *PSSMatrixFP, unsigned char *subject, unsigned char **sequenceHitEnd, unsigned char encoding_numCodes, int4 statistics_ungappedNominalDropoff, int4 blast_ungappedNominalTrigger, int4 ungappedExtensionsPerThread, struct ungappedExtension *ungappedExtension_extensions, struct ungappedExtension *ungappedExtension_additonal, uint4 *numOfTriggerExtensions, uint4 sequenceCount, int2 *scoreMatrix, unsigned char *querySequence) { // int queryPosition; unsigned char *subjectPosition, *subjectStart, *subjectEnd; int4 changeSinceBest = 0; int4 dropoff, originalDropoff; int4 ungappedExtension_bestScore; originalDropoff = dropoff = -statistics_ungappedNominalDropoff; ungappedExtension_bestScore = 0; // Start at queryEnd,subjectEnd (right/last hit position) // queryPosition = matrix + queryOffset * encoding_numCodes; subjectPosition = subjectStart = subjectHit; int2 queryPosition = queryOffset + 1; while (changeSinceBest > dropoff) { // changeSinceBest += queryPosition[__ldg(subjectPosition)]; changeSinceBest += scoreMatrix [querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; // queryPosition = queryPosition - encoding_numCodes; queryPosition = queryPosition - 1; subjectPosition--; // changeSinceBest = queryPosition[__ldg(subjectPosition)]; changeSinceBest = scoreMatrix[querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectStart = subjectPosition; } // queryPosition = queryPosition - encoding_numCodes; queryPosition = queryPosition - 1; subjectPosition--; } // Correct for extra decrement subjectStart++; if (subjectStart > lastHitFP) { *sequenceHitEnd = subjectHit; return; } // Starting at right/last hit position again // queryPosition = matrix + (queryOffset + 1) * encoding_numCodes; queryPosition = (queryOffset + 2); subjectPosition = subjectHit + 1; subjectEnd = subjectHit; changeSinceBest = 0; // May need to alter dropoff so we also dropoff if below zero if (-ungappedExtension_bestScore > originalDropoff) { dropoff = -ungappedExtension_bestScore; } // Extend end of alignment until dropoff while (changeSinceBest > dropoff) { // Shucai // changeSinceBest += queryPosition[__ldg(subjectPosition)]; changeSinceBest += scoreMatrix [querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; // queryPosition = queryPosition + encoding_numCodes; queryPosition = queryPosition + 1; subjectPosition++; // changeSinceBest = queryPosition[__ldg(subjectPosition)]; changeSinceBest = scoreMatrix[querySequence[queryPosition] * encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectEnd = subjectPosition; // Check need for change in dropoff if ((dropoff = -ungappedExtension_bestScore) < originalDropoff) { dropoff = originalDropoff; } } // queryPosition = queryPosition + encoding_numCodes; queryPosition = queryPosition + 1; subjectPosition++; } subjectEnd--; *sequenceHitEnd = subjectEnd; if (ungappedExtension_bestScore >= blast_ungappedNominalTrigger) { int2 diagonal; struct ungappedExtension *newUngappedExtension = NULL; newUngappedExtension = *numOfTriggerExtensions >= ungappedExtensionsPerThread ? &ungappedExtension_additonal [atomicAdd(&global_numAdditionalTriggerExtensions, 1)] : &ungappedExtension_extensions [atomicAdd(numOfTriggerExtensions, 1)]; // newUngappedExtension = // &ungappedExtension_extensions[*numOfTriggerExtensions]; // Calculate diagonal diagonal = (subjectHit - subject) - queryOffset; // Determine offsets from pointers newUngappedExtension->start.subjectOffset = subjectStart - subject; newUngappedExtension->end.subjectOffset = subjectEnd - subject; newUngappedExtension->start.queryOffset = newUngappedExtension->start.subjectOffset - diagonal; newUngappedExtension->end.queryOffset = newUngappedExtension->end.subjectOffset - diagonal; newUngappedExtension->seed = ungappedExtension_findProteinSeed_sm( newUngappedExtension, subject, encoding_numCodes, querySequence, scoreMatrix); newUngappedExtension->next = NULL; newUngappedExtension->nominalScore = ungappedExtension_bestScore; newUngappedExtension->status = ungappedExtension_UNGAPPED; newUngappedExtension->sequenceCount = sequenceCount; // newUngappedExtension->tid = tid; // Shucai // Record the number of hits satisfying the next step //(*numOfTriggerExtensions)++; } } __global__ void search_protein_hit_detection_prescan( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, unsigned char *wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, unsigned int groupNum) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int bin_numHits_share[BLOCK_SIZE * BIN_X]; unsigned int *bin_numHits = bin_numHits_share + (tt >> 5 << BIN_POWER); for (int ii = 0; ii < BIN_X; ii++) bin_numHits[laneId + ii * 32] = 0; int *blast_numHits = blast_numHits_g + (warpId << BIN_POWER); unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint4 sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(bin_numHits[bin_id]), 1); #endif queryOffsets++; } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; } for (int ii = 0; ii < BIN_X; ii++) blast_numHits[laneId + ii * 32] = bin_numHits[laneId + ii * 32]; } __global__ void search_protein_hit_detection( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, const unsigned char *__restrict wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, uint64_t *HitInfo_g, unsigned int groupNum, int *binOffset_g) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int hitsDia_share[BLOCK_SIZE * BIN_X]; __shared__ int binOffset_share[BLOCK_SIZE * BIN_X]; unsigned int *hitsDia = hitsDia_share + (tt >> 5 << BIN_POWER); int *binOffset_t = binOffset_share + (tt >> 5 << BIN_POWER); // uint2 hitslast = 0; for (int ii = 0; ii < BIN_X; ii++) { int bin_id = (warpId << BIN_POWER) + laneId + ii * 32; binOffset_t[laneId + ii * 32] = binOffset_g[bin_id] - blast_numHits_g[bin_id]; hitsDia[laneId + ii * 32] = 0; } unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint64_t sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(hitsDia[bin_id]), 1); unsigned int bin_p = binOffset_t[bin_id] + nHits; HitInfo_g[bin_p] = (sequenceCount << 32) + ((diagonal + 0x3fff) << 16) + subjectOffset; queryOffsets++; #endif } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; } } __global__ void search_protein_hit_detection_warp( struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP_g, struct groupFP *wordLookupDFA_groupsFP, const unsigned char *__restrict wordLookupDFAFP, int *blast_numHits_g, uint4 nTotalSequenceNum, uint64_t *HitInfo_g, unsigned int groupNum, unsigned int num_hits) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; unsigned int warpId = tid >> 5; unsigned char laneId = tt & 31; extern __shared__ unsigned char DFA_groupFP_array[]; __shared__ unsigned int hitsDia_share[BLOCK_SIZE * BIN_X]; __shared__ unsigned int hitsLast_share[BLOCK_SIZE * BIN_X]; unsigned int *hitsDia = hitsDia_share + (tt >> 5 << BIN_POWER); unsigned int *hitsLast = hitsLast_share + (tt >> 5 << BIN_POWER); uint64_t *HitInfo_t = HitInfo_g + num_hits * (warpId << BIN_POWER); // uint64_t *HitInfo_b = HitInfo_g + num_hits * (warpId << 7); // int *hitsoffset_warp = hitsoffset_g + NUM_SEQS * (warpId << 5); // int *hitsoffset_b = hitsoffset_g + NUM_SEQS * (warpId << 7); // int *hitsoffset = hitsoffset_warp + laneId * NUM_SEQS; // hitsoffset[0] = 0; // hitsoffset++; // uint2 hitslast = 0; for (int ii = 0; ii < BIN_X; ii++) hitsDia[laneId + ii * 32] = 0; int *blast_numHits = blast_numHits_g + (warpId << BIN_POWER); // int *blast_numSeqs = blast_numSeqs_g + (warpId << 5); unsigned char *subject, *sequenceEnd, *address, *start; int4 subjectOffset, count; unsigned char currentWord; const unsigned char *currentBlock; uint2 *queryOffsets, queryOffset; struct parameters parametersFP = *parametersFP_g; extern __shared__ struct groupFP DFA_groupFP_s[]; for (unsigned int ii = tt; ii < groupNum; ii += blockDim.x) { DFA_groupFP_s[ii] = wordLookupDFA_groupsFP[ii]; } __syncthreads(); uint2 *wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP.additionalQueryPositionOffset); uint64_t sequenceCount = warpId; while (sequenceCount < nTotalSequenceNum) { if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP.parameters_wordSize) { start = subject = sequence + sequenceDataFP[sequenceCount].offset; sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; address = start + laneId; while (address + parametersFP.parameters_wordSize - 1 < sequenceEnd) { struct groupFP *currentGroupFP = DFA_groupFP_s; unsigned char letter = *address; for (count = 1; count < parametersFP.parameters_wordSize; count++) { currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; address++; letter = *address; } currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter currentWord = letter < parametersFP.wordLookupDFA_numCodes ? __ldg(currentBlock + letter) : __ldg(currentBlock); currentGroupFP = letter < parametersFP.wordLookupDFA_numCodes ? &DFA_groupFP_s[currentGroupFP->nextGroups + letter] : &DFA_groupFP_s[currentGroupFP->nextGroups]; if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!__ldg(queryOffsets)) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (__ldg(queryOffsets + 1) * constants_max_int2) + __ldg(queryOffsets + 2); } do { #ifndef NO_STAGE2 queryOffset = __ldg(queryOffsets); int2 diagonal = subjectOffset - queryOffset; unsigned char bin_id = (uint2)diagonal & BIN_MARK; unsigned int nHits = atomicAdd(&(hitsDia[bin_id]), 1); unsigned int bin_p = bin_id * num_hits + nHits; HitInfo_t[bin_p] = (sequenceCount << 32) + ((diagonal + 0x3fff) << 16) + subjectOffset; // uint32_t *hit2 = (uint32_t *)(&HitInfo_t[bin_p]); // uint16_t *hit4 = (uint16_t *)(&HitInfo_t[bin_p]); // hit2[1] = sequenceCount; // hit4[1] = diagonal + 0x3fff; // hit4[0] = subjectOffset; queryOffsets++; #endif } while (__ldg(queryOffsets)); } start += 32; address = start + laneId; } } sequenceCount += gridDim.x * 4; for (int ii = 0; ii < BIN_X; ii++) { if ((hitsDia[laneId + ii * 32] - hitsLast[laneId + ii * 32]) < 2) { hitsDia[laneId + ii * 32] = hitsLast[laneId + ii * 32]; } hitsLast[laneId + ii * 32] = hitsDia[laneId + ii * 32]; } } for (int ii = 0; ii < BIN_X; ii++) blast_numHits[laneId + ii * 32] = hitsDia[laneId + ii * 32]; } void search_protein2hitParallel(struct scoreMatrix *scoreMatrixp, struct PSSMatrix PSSMatrix, struct PSSMatrixFP PSSMatrixFP, struct sequenceData *sequenceData_host, uint4 numSequences, uint4 tickFrequency) { // Shucai uint4 i, j, sequenceCount = 0; uint4 nRoundOffset; // PSSMatrix pointers struct PSSMatrixFP *PSSMatrixFPD; int2 *matrixBodyD; // Input database sequence struct sequenceDataFP *sequenceDataFP; struct sequenceDataFP *sequenceDataFPD; unsigned char *sequencesD; // unsigned char *sequencesH; unsigned char *roundStartAddress; // ungapped extension struct ungappedExtension *ungappedExtensionsD; struct ungappedExtension *ungappedExtension; struct ungappedExtension *ungappedExtensionCur, *newUngappedExtension, *additionalUngappedExtension; struct sequenceData *sequenceData; cudaMallocHost((void **)&sequenceData, sizeof(struct sequenceData) * numSequences); memcpy(sequenceData, sequenceData_host, sizeof(struct sequenceData) * numSequences); // ungapped extension numbers uint4 *blast_numUngappedExtensionsD, *blast_numUngappedExtensionsH; uint4 *blast_numTriggerExtensionsD, *blast_numTriggerExtensionsH; uint4 numAdditionalTriggerExtensions, numExtensions; int *blast_numHitsD, *blast_numHitsH; int *blast_numExtD; int *binOffsetD; int4 preSequenceCount; // For time record struct timeval t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; int4 wordNum, groupNum; // parameters struct parameters strParameters; struct parameters *parametersD; // word lookup table struct groupFP *wordLookupDFA_groupD; unsigned char *wordLookupDFAD; uint4 wordLookupDFA_size; // grid and block dimensions int nBlockNum = NUM_BLOCK; int nBlockSize = BLOCK_SIZE; int nTotalThreadNum = nBlockNum * nBlockSize; // get t0 gettimeofday(&t0, NULL); wordNum = wordLookupDFA_numWords; groupNum = wordLookupDFA_numGroups; // printf("\n"); // Allocate GPU buffer for PSSMatrix cudaMalloc((void **)&PSSMatrixFPD, sizeof(struct PSSMatrixFP)); cudaMalloc((void **)&matrixBodyD, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // printf("matrixBody: %d\n", (PSSMatrixFP.length + 2) * encoding_numCodes); char *matrixBodyH = (char *)malloc(sizeof(char) * (PSSMatrixFP.length + 2) * encoding_numCodes); for (int ii = 0; ii < PSSMatrixFP.length + 2; ii++) { for (int jj = 0; jj < encoding_numCodes; jj++) { // printf(".%d.", (PSSMatrixFP.matrix - encoding_numCodes)[ii * // encoding_numCodes + jj]); matrixBodyH[ii * encoding_numCodes + jj] = (PSSMatrixFP.matrix - encoding_numCodes)[ii * encoding_numCodes + jj]; if (ii == 0 || ii == PSSMatrixFP.length + 1 || jj == encoding_numCodes - 1) matrixBodyH[ii * encoding_numCodes + jj] = -127; // printf(".%d.", matrixBodyH[ii * encoding_numCodes + jj]); } // printf("\n"); } // Copy PSSMatrix to device memory cudaMemcpy(PSSMatrixFPD, &PSSMatrixFP, sizeof(struct PSSMatrixFP), cudaMemcpyHostToDevice); cudaMemcpy(matrixBodyD, matrixBodyH, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes, cudaMemcpyHostToDevice); free(matrixBodyH); // cudaMemcpyToSymbol(matrixBody_c, (PSSMatrixFP.matrix - encoding_numCodes), // sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // Each thread is for align of one database sequence // sequenceDataFP = (struct sequenceDataFP *)global_malloc(numSequences * // sizeof(struct sequenceDataFP)); cudaMallocHost((void **)&sequenceDataFP, sizeof(struct sequenceDataFP) * numSequences); cudaMalloc((void **)&sequenceDataFPD, numSequences * sizeof(struct sequenceDataFP)); // Allocate buffer for hit matrix offset // hitMatrix_offsetH = (uint4 *)global_malloc((nBlockNum + 1) * // sizeof(uint4)); // cudaMalloc((void **)&hitMatrix_offsetD, (nBlockNum + 1) * sizeof(uint4)); // Allocate ungapped extension buffer on device // int4 nUngappedExtensionNum = UNGAPEXT_PER_THREAD * nTotalThreadNum; int4 nUngappedExtensionNum = TOTAL_UNGAPPED_EXT; strParameters.ungappedExtensionsPerThread = nUngappedExtensionNum / nTotalThreadNum - 1; strParameters.ungappedExtAdditionalStartLoc = strParameters.ungappedExtensionsPerThread * nTotalThreadNum; cudaMalloc((void **)&ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension)); ungappedExtension = (struct ungappedExtension *)global_malloc( nUngappedExtensionNum * sizeof(struct ungappedExtension)); // Allocate numbers for ungapped extensions blast_numUngappedExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numTriggerExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numHitsH = (int *)global_malloc(sizeof(int) * nTotalThreadNum * BIN_X); cudaMalloc((void **)&blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum); cudaMalloc((void **)&blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum); cudaMalloc((void **)&blast_numHitsD, sizeof(int) * nTotalThreadNum * BIN_X); cudaMalloc((void **)&blast_numExtD, sizeof(int) * nTotalThreadNum * BIN_X); cudaMalloc((void **)&binOffsetD, sizeof(int) * nTotalThreadNum * BIN_X); // Allocate device memory, about 132Mbytes (according to texture limit) cudaMalloc((void **)&sequencesD, sizeof(unsigned char) * (parameters_batchSize + 50000)); // Allocate parameters buffer on device cudaMalloc((void **)&parametersD, sizeof(struct parameters)); strParameters.parameters_wordSize = parameters_wordSize; strParameters.encoding_numCodes = encoding_numCodes; strParameters.wordLookupDFA_numCodes = wordLookupDFA_numCodes; strParameters.additionalQueryPositionOffset = wordNum * sizeof(char) + sizeof(int2) * wordLookupDFA_numExtPositions; strParameters.blast_ungappedNominalTrigger = blast_ungappedNominalTrigger; strParameters.statistics_ungappedNominalDropoff = statistics_ungappedNominalDropoff; strParameters.parameters_A = parameters_A; strParameters.parameters_overlap = parameters_overlap; // printf("parameters_A: %d parameters_overlap: %d\n", parameters_A, // parameters_overlap); cudaMemcpy(parametersD, &strParameters, sizeof(struct parameters), cudaMemcpyHostToDevice); // printf("parameters_Size: %d\n", sizeof(struct parameters)); // Allocate word lookup table wordLookupDFA_size = sizeof(char) * wordNum + 2 * sizeof(int2) * wordLookupDFA_numExtPositions; cudaMalloc((void **)&wordLookupDFA_groupD, sizeof(struct groupFP) * groupNum); cudaMalloc((void **)&wordLookupDFAD, wordLookupDFA_size); cudaMemcpy(wordLookupDFAD, wordLookupDFA, wordLookupDFA_size, cudaMemcpyHostToDevice); cudaMemcpy(wordLookupDFA_groupD, wordLookupDFA_groupsFP, sizeof(struct groupFP) * groupNum, cudaMemcpyHostToDevice); // printf("numDFAGroup: %d DFA_group_size: %u wordLookupDFA_size: %u\n", // groupNum, sizeof(struct groupFP) * groupNum, wordLookupDFA_size); cudaMemset(blast_numUngappedExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); cudaMemset(blast_numHitsD, 0, sizeof(uint4) * nBlockNum); // //Use constant memory for the word lookup table group // cudaMemcpyToSymbol(wordLookupDFA_groupsC, wordLookupDFA_groupsFP, //sizeof(struct groupFP) * groupNum); // // Use constant memory to store score matrix int scoreMatrixSize = encoding_numCodes * encoding_numCodes; cudaMemcpyToSymbol(scoreMatrixC, ((char *)scoreMatrixp->matrix) + sizeof(int2 *) * encoding_numCodes, sizeof(int2) * scoreMatrixSize); // Use constant memory to store query sequence unsigned char *tempQueryCode; tempQueryCode = (unsigned char *)global_malloc(sizeof(unsigned char) * (PSSMatrixFP.length + 2)); memcpy(&tempQueryCode[1], PSSMatrixFP.queryCodes, sizeof(unsigned char) * PSSMatrixFP.length); tempQueryCode[0] = encoding_sentinalCode; tempQueryCode[PSSMatrixFP.length + 1] = encoding_sentinalCode; cudaMemcpyToSymbol(querySequenceC, tempQueryCode, sizeof(unsigned char) * (PSSMatrixFP.length + 2)); free(tempQueryCode); // uint4 iniVal = nTotalThreadNum; // printf("PSSMatrixSize: %d scoreMatrixSize: %d querySize: %d DFA_group: %d // DFA_qp: %d\n", sizeof(char) * (PSSMatrixFP.length + 2) * encoding_numCodes // >> 10, sizeof(int2) * scoreMatrixSize >> 10, (PSSMatrixFP.length + 2) * // sizeof(unsigned char) >> 10, (sizeof(struct groupFP) * groupNum) >> 10, // wordLookupDFA_size >> 10); // get t1 gettimeofday(&t1, NULL); int4 numSequencesRound, numSequenceProcessed; numSequenceProcessed = 0; // int totalSeqLength = 0; size_t dmem_tot = 0, dmem_free = 0; cudaMemGetInfo(&dmem_free, &dmem_tot); printf("Dmem total: %d (MB) Batch size: %d (MB) Dmem free: %d (MB)\n", dmem_tot >> 20, parameters_batchSize >> 20, dmem_free >> 20); // int *HitNumD; while (numSequenceProcessed < numSequences) { // get t2 gettimeofday(&t2, NULL); cudaMemset(blast_numTriggerExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); roundStartAddress = sequenceData[sequenceCount].sequence - 1; for (i = 0; sequenceCount < numSequences; i++, sequenceCount++) { sequenceDataFP[i].descriptionLength = sequenceData[sequenceCount].descriptionLength; sequenceDataFP[i].descriptionStart = sequenceData[sequenceCount].descriptionStart; sequenceDataFP[i].sequenceLength = sequenceData[sequenceCount].sequenceLength; sequenceDataFP[i].encodedLength = sequenceData[sequenceCount].encodedLength; sequenceDataFP[i].offset = sequenceData[sequenceCount].sequence - roundStartAddress; // about 130MB if (sequenceDataFP[i].offset + sequenceData[sequenceCount].encodedLength > parameters_batchSize) { i++; sequenceCount++; break; } } nRoundOffset = sequenceDataFP[i - 1].offset + sequenceDataFP[i - 1].encodedLength; numSequencesRound = i; cudaMemcpy(sequencesD, roundStartAddress, sizeof(unsigned char) * (nRoundOffset + 2), cudaMemcpyHostToDevice); cudaMemcpy(sequenceDataFPD, sequenceDataFP, sizeof(struct sequenceDataFP) * numSequencesRound, cudaMemcpyHostToDevice); gettimeofday(&t3, NULL); // cudaMemcpyToSymbol(global_sequenceCount, &iniVal, sizeof(uint4)); numAdditionalTriggerExtensions = 0; cudaMemcpyToSymbol(global_numAdditionalTriggerExtensions, &numAdditionalTriggerExtensions, sizeof(uint4)); // get t4 gettimeofday(&t4, NULL); struct timeval s0, s1; cudaError_t cudaRes; dim3 dimGrid(nBlockNum, 1); dim3 dimBlock(nBlockSize, 1); int kernel_time = 0; gettimeofday(&s0, NULL); search_protein_hit_detection_prescan <<<dimGrid, dimBlock, sizeof(struct groupFP) * groupNum>>> (sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numHitsD, numSequencesRound, groupNum); cudaThreadSynchronize(); cudaRes = cudaGetLastError(); if (cudaRes != cudaSuccess) { printf("CUDA error: %s in %d\n", cudaGetErrorString(cudaRes), __LINE__); exit(-1); } int total_numHits = get_total_numHits(blast_numHitsD, nTotalThreadNum * BIN_X); get_bin_offset(blast_numHitsD, binOffsetD, nTotalThreadNum * BIN_X); cudaMemGetInfo(&dmem_free, &dmem_tot); size_t est_mem_usage = total_numHits * sizeof(uint64_t) * 2 + (parameters_batchSize + 50000) * sizeof(unsigned char); printf("Est mem usage: %d (MB) numSeqProc: %d percentProc: %d\n", est_mem_usage >> 20, numSequenceProcessed, 100 * numSequenceProcessed/numSequences); // printf("total_numHits: %d hit buffer memory size: %d (MB)\n", // total_numHits, sizeof(uint64_t) * total_numHits >> 20); // printf("Estimated memory usage: %d (MB)\n", (sizeof(uint64_t) * // total_numHits * 2 + BATCH_SIZE + 2000000) >> 20); uint64_t *HitInfoD; cudaMalloc((void **)&HitInfoD, sizeof(uint64_t) * total_numHits); gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Detection Prescan Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif gettimeofday(&s0, NULL); search_protein_hit_detection <<<dimGrid, dimBlock, sizeof(struct groupFP) * groupNum>>> (sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numHitsD, numSequencesRound, HitInfoD, groupNum, binOffsetD); cudaThreadSynchronize(); cudaRes = cudaGetLastError(); if (cudaRes != cudaSuccess) { printf("CUDA error: %s in %d\n", cudaGetErrorString(cudaRes), __LINE__); exit(-1); } gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Detection Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif hit_sort_filter(blast_numHitsD, blast_numExtD, binOffsetD, HitInfoD, BLOCK_SIZE, NUM_BLOCK, BIN_X, total_numHits, strParameters.parameters_A, strParameters.parameters_overlap); gettimeofday(&s1, NULL); kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); // printf("Hit Sort Time: %d\n", (1000000 * (s1.tv_sec - s0.tv_sec) + // s1.tv_usec - s0.tv_usec)); gettimeofday(&s0, NULL); ungappedExtension_twoHitExtendG_bin_sorted_sm_s <<<dimGrid, dimBlock, sizeof(unsigned char) * (PSSMatrixFP.length + 2)>>> (parametersD, HitInfoD, blast_numUngappedExtensionsD, sequencesD, PSSMatrixFPD, sequenceDataFPD, ungappedExtensionsD, blast_numTriggerExtensionsD, blast_numHitsD, blast_numExtD, binOffsetD); cudaThreadSynchronize(); gettimeofday(&s1, NULL); cudaRes = cudaGetLastError(); if (cudaRes != cudaSuccess) { printf("CUDA error: %s in %d\n", cudaGetErrorString(cudaRes), __LINE__); exit(-1); } kernel_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); #ifdef VERBOSE printf( "Hit Extension Time: %f\n", (float)(1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec) / 1000000); #endif // get t5 gettimeofday(&t5, NULL); // Post processing // copy hit results back cudaMemcpy(blast_numTriggerExtensionsH, blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(ungappedExtension, ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension), cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&numAdditionalTriggerExtensions, global_numAdditionalTriggerExtensions, sizeof(uint4)); // get t6 gettimeofday(&t6, NULL); // Add hits to the alignment list // Additional buffer is used, sort ungapped extensions // according to sequence index long qsort_time = 0; if (numAdditionalTriggerExtensions > 0) { additionalUngappedExtension = ungappedExtension + strParameters.ungappedExtAdditionalStartLoc; gettimeofday(&s0, NULL); qsort(additionalUngappedExtension, numAdditionalTriggerExtensions, sizeof(struct ungappedExtension), compare_ungappedextension); gettimeofday(&s1, NULL); qsort_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); } for (i = 0; i < nTotalThreadNum; i++) { // printf("%d %d\n", i, blast_numTriggerExtensionsH[i]); if (blast_numTriggerExtensionsH[i] > 0) { // ungappedExtensionCur = ungappedExtension + i * UNGAPEXT_PER_THREAD; ungappedExtensionCur = ungappedExtension + i * strParameters.ungappedExtensionsPerThread; preSequenceCount = -1; numExtensions = (blast_numTriggerExtensionsH[i] > strParameters.ungappedExtensionsPerThread) ? strParameters.ungappedExtensionsPerThread : blast_numTriggerExtensionsH[i]; gettimeofday(&s0, NULL); qsort(ungappedExtensionCur, numExtensions, sizeof(struct ungappedExtension), compare_ungappedextension); gettimeofday(&s1, NULL); qsort_time += (1000000 * (s1.tv_sec - s0.tv_sec) + s1.tv_usec - s0.tv_usec); for (j = 0; j < numExtensions; j++) { // printf("seq: %d sub: %d - %d qury: %d - %d seed: %d - %d\n", ////i, // ungappedExtensionCur[j].sequenceCount + numSequenceProcessed, // ungappedExtensionCur[j].start.subjectOffset, // ungappedExtensionCur[j].end.subjectOffset, // ungappedExtensionCur[j].start.queryOffset, // ungappedExtensionCur[j].end.queryOffset, // ungappedExtensionCur[j].seed.queryOffset, // ungappedExtensionCur[j].seed.subjectOffset //); if (ungappedExtensionCur[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = ungappedExtensionCur[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &ungappedExtensionCur[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } // Add additional extensions if (blast_numTriggerExtensionsH[i] > strParameters.ungappedExtensionsPerThread) { int tempStartLoc = findStartLoc(additionalUngappedExtension, i, numAdditionalTriggerExtensions); numExtensions = blast_numTriggerExtensionsH[i] - strParameters.ungappedExtensionsPerThread; for (j = tempStartLoc; j < numExtensions + tempStartLoc; j++) { if (additionalUngappedExtension[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[additionalUngappedExtension[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = additionalUngappedExtension[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &additionalUngappedExtension[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } } blast_numTriggerExtensions += blast_numTriggerExtensionsH[i]; } } #ifdef VERBOSE printf("Extension Sort Time: %d\n", (float)qsort_time / 1000000); #endif numSequenceProcessed += numSequencesRound; // cudaFree(hitMatrix_furthestD); // get t7 gettimeofday(&t7, NULL); // gapped extension for the current chunk of sequences on the GPU // alignments_fingGoodAlignmentsGPU(&PSSMatrixFPD, //GPU buffer // PSSMatrixFP, // scoreMatrixp, //&matrixBodyD, //GPU buffer //&sequenceDataFPD[w], //GPU buffer //&sequencesD[w], //GPU buffer // nRoundOffset); // use cpu for gapped extension // alignments_findGoodAlignments(PSSMatrix, PSSMatrixFP); // get t9 gettimeofday(&t9, NULL); timeRecord.gappedAlignmentTime += (1000000 * (t9.tv_sec - t7.tv_sec) + t9.tv_usec - t7.tv_usec); // aggregate execution time timeRecord.preProcessTime += (1000000 * (t3.tv_sec - t2.tv_sec) + t3.tv_usec - t2.tv_usec); timeRecord.dataCopyTimeH2D += (1000000 * (t4.tv_sec - t3.tv_sec) + t4.tv_usec - t3.tv_usec); timeRecord.searchTime += (1000000 * (t5.tv_sec - t4.tv_sec) + t5.tv_usec - t4.tv_usec); timeRecord.dataCopyTimeD2H += (1000000 * (t6.tv_sec - t5.tv_sec) + t6.tv_usec - t5.tv_usec); timeRecord.addUngappedExtensionTime += (1000000 * (t7.tv_sec - t6.tv_sec) + t7.tv_usec - t6.tv_usec); blast_numHits += total_numHits; cudaFree(HitInfoD); } cudaMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); for (j = 0; j < nTotalThreadNum; j++) blast_numUngappedExtensions += blast_numUngappedExtensionsH[j]; cudaFree(PSSMatrixFPD); cudaFree(matrixBodyD); cudaFree(ungappedExtensionsD); cudaFree(blast_numUngappedExtensionsD); cudaFree(blast_numTriggerExtensionsD); cudaFree(blast_numHitsD); cudaFree(blast_numExtD); cudaFree(binOffsetD); cudaFree(parametersD); cudaFree(wordLookupDFA_groupD); cudaFree(wordLookupDFAD); cudaFree(sequenceDataFPD); cudaFree(sequencesD); cudaFree(sequenceDataFP); free(ungappedExtension); free(blast_numUngappedExtensionsH); free(blast_numTriggerExtensionsH); free(blast_numHitsH); cudaFreeHost(sequenceData); // get t8 gettimeofday(&t8, NULL); // Record time timeRecord.iniTime = 1000000 * (t1.tv_sec - t0.tv_sec) + t1.tv_usec - t0.tv_usec; timeRecord.postProcessTime = 1000000 * (t8.tv_sec - t7.tv_sec) + t8.tv_usec - t7.tv_usec; timeRecord.hitUngappedExtTime = 1000000 * (t8.tv_sec - t1.tv_sec) + t8.tv_usec - t1.tv_usec; } // Embarrassingly parallel approach is used. One thread is used for // the hit detection of one sequence __global__ void search_protein1hitKernel( struct PSSMatrixFP *PSSMatrixFP, int2 *matrixBody, struct sequenceDataFP *sequenceDataFP, unsigned char *sequence, struct parameters *parametersFP, struct groupFP *wordLookupDFA_groupsFP, unsigned char *wordLookupDFAFP, uint4 *blast_numUngappedExtensions, uint4 *blast_numTriggerExtensions, uint4 *blast_numHits, uint4 *hitMatrix_furthestp, uint4 *hitMatrix_offsetp, struct ungappedExtension *ungappedExtension_extensionsp, uint4 nTotalSequenceNum) { int bid = blockIdx.x * gridDim.y + blockIdx.y; int tid = bid * blockDim.x * blockDim.y + threadIdx.x * blockDim.y + threadIdx.y; unsigned char *subject, *sequenceEnd, *address; int4 subjectOffset, count; unsigned char currentWord, *currentBlock; struct groupFP *currentGroupFP; uint2 *wordLookupDFA_AddiPositions; uint4 numOfTriggerExtensions = 0; uint2 *queryOffsets, queryOffset; struct ungappedExtension *ungappedExtension_current; int4 diagonal; uint4 *lastHitFP; uint4 ungappedExtension_subjectEndReachedFP; uint4 *hitMatrix_Local; uint4 sequenceCount; hitMatrix_Local = hitMatrix_furthestp + hitMatrix_offsetp[tid] + PSSMatrixFP->length; ungappedExtension_extensionsp->start.subjectOffset = 0; ungappedExtension_current = ungappedExtension_extensionsp + tid * UNGAPEXT_PER_THREAD; wordLookupDFA_AddiPositions = (uint2 *)((char *)wordLookupDFAFP + parametersFP->additionalQueryPositionOffset); // Set the PSSMatrix body PSSMatrixFP->matrix = matrixBody + parametersFP->encoding_numCodes; sequenceCount = tid; while (sequenceCount < nTotalSequenceNum) { subject = address = sequence + sequenceDataFP[sequenceCount].offset; if (sequenceDataFP[sequenceCount].sequenceLength >= parametersFP->parameters_wordSize) { currentGroupFP = wordLookupDFA_groupsFP; // currentGroupFP = wordLookupDFA_groupsC; count = 1; while (count < parametersFP->parameters_wordSize) { if (*address < parametersFP->wordLookupDFA_numCodes) { currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups + *address]; // currentGroupFP = &wordLookupDFA_groupsC[currentGroupFP->nextGroups // + *address]; } else { currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups]; // currentGroupFP = // &wordLookupDFA_groupsC[currentGroupFP->nextGroups]; } address++; count++; } sequenceEnd = subject + sequenceDataFP[sequenceCount].sequenceLength; while (address < sequenceEnd) { currentBlock = &wordLookupDFAFP[currentGroupFP->nextWords]; // If current code is a regular letter if (*address < parametersFP->wordLookupDFA_numCodes) { currentWord = currentBlock[*address]; currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups + *address]; // currentGroupFP = &wordLookupDFA_groupsC[currentGroupFP->nextGroups // + *address]; } else { if (address >= sequenceEnd) break; currentWord = *currentBlock; currentGroupFP = &wordLookupDFA_groupsFP[currentGroupFP->nextGroups]; // currentGroupFP = // &wordLookupDFA_groupsC[currentGroupFP->nextGroups]; } if (currentWord) { subjectOffset = address - subject; // At least one query position, stored at an extenal address queryOffsets = ((uint2 *)currentBlock) - currentWord; if (!*queryOffsets) { // Go to an outside address for additional positions queryOffsets = wordLookupDFA_AddiPositions + (*(queryOffsets + 1) * constants_max_int2) + *(queryOffsets + 2); } do { queryOffset = *queryOffsets; #ifndef NO_STAGE2 // Calculate the diagonal this hit is on diagonal = subjectOffset - queryOffset; // If we have not extended past this point on this diagonal lastHitFP = hitMatrix_Local + diagonal; if (*lastHitFP < address - sequence) { // Number of extensions for each subject sequence blast_numUngappedExtensions[tid]++; // If only one hit triggered this extension ungappedExtension_oneHitExtendD( sequence, queryOffset, address, *PSSMatrixFP, subject, &ungappedExtension_subjectEndReachedFP, parametersFP->encoding_numCodes, parametersFP->statistics_ungappedNominalDropoff, parametersFP->blast_ungappedNominalTrigger, ungappedExtension_current, &numOfTriggerExtensions, sequenceCount, tid); // Update furthest reached value for the diagonal *lastHitFP = ungappedExtension_subjectEndReachedFP; } #endif queryOffsets++; blast_numHits[tid]++; } while (*queryOffsets); } address++; } } // option======================================================= // sequenceCount = atomicAdd(&global_sequenceCount, 1); sequenceCount += gridDim.x * blockDim.x; //============================================================ } blast_numTriggerExtensions[tid] = (uint4)numOfTriggerExtensions; return; } __device__ struct ungappedExtension *ungappedExtension_oneHitExtendD( unsigned char *sequenceStart, int4 queryOffset, unsigned char *subjectHit, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, uint4 *sequenceHitEnd, unsigned char encoding_numCodes, int4 statistics_ungappedNominalDropoff, int4 blast_ungappedNominalTrigger, struct ungappedExtension *ungappedExtension_extensions, uint4 *numOfTriggerExtensions, uint4 sequenceCount, int4 tid) { int2 *queryPosition; // int4 queryPosition; unsigned char *subjectPosition, *subjectStart, *subjectEnd; int4 changeSinceBest = 0; int4 dropoff, originalDropoff; int4 ungappedExtension_bestScore; originalDropoff = dropoff = -statistics_ungappedNominalDropoff; ungappedExtension_bestScore = 0; // Start at queryEnd,subjectEnd (right/last hit position) queryPosition = PSSMatrixFP.matrix + queryOffset * encoding_numCodes; // queryPosition = queryOffset + 1; subjectPosition = subjectStart = subjectHit; while (changeSinceBest > dropoff) { changeSinceBest += queryPosition[*subjectPosition]; // changeSinceBest += scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; queryPosition = queryPosition - encoding_numCodes; // queryPosition = queryPosition - 1; subjectPosition--; changeSinceBest = queryPosition[*subjectPosition]; // changeSinceBest = scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectStart = subjectPosition; } queryPosition = queryPosition - encoding_numCodes; // queryPosition = queryPosition - 1; subjectPosition--; } // Correct for extra decrement subjectStart++; // Starting at right/last hit position again queryPosition = PSSMatrixFP.matrix + (queryOffset + 1) * encoding_numCodes; // queryPosition = (queryOffset + 2); subjectPosition = subjectEnd = subjectHit + 1; changeSinceBest = 0; // May need to alter dropoff so we also dropoff if below zero if (-ungappedExtension_bestScore > originalDropoff) { dropoff = -ungappedExtension_bestScore; } // Extend end of alignment until dropoff while (changeSinceBest > dropoff) { // Shucai changeSinceBest += queryPosition[*subjectPosition]; // changeSinceBest += scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; // If we have got a positive score if (changeSinceBest > 0) { // Keep updating best score and resetting change-since-best // whilst we are reading positive scores do { ungappedExtension_bestScore += changeSinceBest; queryPosition = queryPosition + encoding_numCodes; // queryPosition = queryPosition + 1; subjectPosition++; changeSinceBest = queryPosition[*subjectPosition]; // changeSinceBest = scoreMatrixC[querySequenceC[queryPosition] * // encoding_numCodes + (*subjectPosition)]; } while (changeSinceBest > 0); subjectEnd = subjectPosition; // Check need for change in dropoff if ((dropoff = -ungappedExtension_bestScore) < originalDropoff) { dropoff = originalDropoff; } } queryPosition = queryPosition + encoding_numCodes; // queryPosition = queryPosition + 1; subjectPosition++; } subjectEnd--; //*sequenceHitEnd = subjectPosition - subject; *sequenceHitEnd = subjectPosition - sequenceStart; if (ungappedExtension_bestScore >= blast_ungappedNominalTrigger) { int4 diagonal; struct ungappedExtension *newUngappedExtension = NULL; newUngappedExtension = &ungappedExtension_extensions[*numOfTriggerExtensions]; // Calculate diagonal diagonal = (subjectHit - subject) - queryOffset; // Determine offsets from pointers newUngappedExtension->start.subjectOffset = subjectStart - subject; newUngappedExtension->end.subjectOffset = subjectEnd - subject; newUngappedExtension->start.queryOffset = newUngappedExtension->start.subjectOffset - diagonal; newUngappedExtension->end.queryOffset = newUngappedExtension->end.subjectOffset - diagonal; // newUngappedExtension->seed = // ungappedExtension_findProteinSeed(newUngappedExtension, // PSSMatrixFP, subject, encoding_numCodes); newUngappedExtension->next = NULL; newUngappedExtension->nominalScore = ungappedExtension_bestScore; newUngappedExtension->status = ungappedExtension_UNGAPPED; newUngappedExtension->sequenceCount = sequenceCount; // Shucai // Record the number of hits satisfying the next step (*numOfTriggerExtensions)++; return newUngappedExtension; } else { return NULL; } } // Shucai // Search a protein database using 1-hit extension mode void search_protein1hitParallel(struct scoreMatrix *scoreMatrixp, struct PSSMatrixFP PSSMatrixFP, struct sequenceData *sequenceData, uint4 numSequences, uint4 tickFrequency) { // Shucai uint4 i, j, sequenceCount = 0; uint4 nRoundOffset; // PSSMatrix pointers struct PSSMatrixFP *PSSMatrixFPD; int2 *matrixBodyD; // Input database sequence struct sequenceDataFP *sequenceDataFP; struct sequenceDataFP *sequenceDataFPD; unsigned char *sequencesD; unsigned char *roundStartAddress; // ungapped extension struct ungappedExtension *ungappedExtensionsD; struct ungappedExtension *ungappedExtension; struct ungappedExtension *ungappedExtensionCur, *newUngappedExtension; // ungapped extension numbers uint4 *blast_numUngappedExtensionsD, *blast_numUngappedExtensionsH; uint4 *blast_numTriggerExtensionsD, *blast_numTriggerExtensionsH; uint4 *blast_numHitsD, *blast_numHitsH; uint4 *hitMatrix_furthestD; uint4 *hitMatrix_offsetH; uint4 *hitMatrix_offsetD; uint4 preSequenceCount; // For time record struct timeval t0, t1, t2, t3, t4, t5, t6, t7, t8; int4 wordNum, groupNum; // parameters struct parameters strParameters; struct parameters *parametersD; // word lookup table struct groupFP *wordLookupDFA_groupD; unsigned char *wordLookupDFAD; uint4 wordLookupDFA_size; // grid and block dimensions int nBlockNum = NUM_BLOCK; int nBlockSize = BLOCK_SIZE; int nTotalThreadNum = nBlockNum * nBlockSize; dim3 dimGrid(nBlockNum, 1); dim3 dimBlock(nBlockSize, 1); // get t0 gettimeofday(&t0, NULL); wordNum = wordLookupDFA_numWords; groupNum = wordLookupDFA_numGroups; // Allocate GPU buffer for PSSMatrix cudaMalloc((void **)&PSSMatrixFPD, sizeof(struct PSSMatrixFP)); cudaMalloc((void **)&matrixBodyD, sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes); // Copy PSSMatrix to device memory cudaMemcpy(PSSMatrixFPD, &PSSMatrixFP, sizeof(struct PSSMatrixFP), cudaMemcpyHostToDevice); cudaMemcpy(matrixBodyD, (PSSMatrixFP.matrix - encoding_numCodes), sizeof(int2) * (PSSMatrixFP.length + 2) * encoding_numCodes, cudaMemcpyHostToDevice); // Each thread is for align of one database sequence sequenceDataFP = (struct sequenceDataFP *)global_malloc( numSequences * sizeof(struct sequenceDataFP)); cudaMalloc((void **)&sequenceDataFPD, numSequences * sizeof(struct sequenceDataFP)); // Allocate buffer for hit matrix offset hitMatrix_offsetH = (uint4 *)global_malloc((nTotalThreadNum + 1) * sizeof(uint4)); cudaMalloc((void **)&hitMatrix_offsetD, (nTotalThreadNum + 1) * sizeof(uint4)); // Allocate ungapped extension buffer on device int4 nUngappedExtensionNum = UNGAPEXT_PER_THREAD * nTotalThreadNum; cudaMalloc((void **)&ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension)); ungappedExtension = (struct ungappedExtension *)global_malloc( nUngappedExtensionNum * sizeof(struct ungappedExtension)); // Allocate numbers for ungapped extensions blast_numUngappedExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numTriggerExtensionsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); blast_numHitsH = (uint4 *)global_malloc(sizeof(uint4) * nTotalThreadNum); cudaMalloc((void **)&blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum); cudaMalloc((void **)&blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum); cudaMalloc((void **)&blast_numHitsD, sizeof(uint4) * nTotalThreadNum); // Allocate device memory, about 132Mbytes (according to texture limit) cudaMalloc((void **)&sequencesD, sizeof(unsigned char) * 132000000); // Allocate parameters buffer on device cudaMalloc((void **)&parametersD, sizeof(struct parameters)); strParameters.parameters_wordSize = parameters_wordSize; strParameters.encoding_numCodes = encoding_numCodes; strParameters.wordLookupDFA_numCodes = wordLookupDFA_numCodes; strParameters.additionalQueryPositionOffset = wordNum * sizeof(char) + sizeof(int2) * wordLookupDFA_numExtPositions; strParameters.blast_ungappedNominalTrigger = blast_ungappedNominalTrigger; strParameters.statistics_ungappedNominalDropoff = statistics_ungappedNominalDropoff; cudaMemcpy(parametersD, &strParameters, sizeof(struct parameters), cudaMemcpyHostToDevice); // Allocate word lookup table wordLookupDFA_size = sizeof(char) * wordNum + 2 * sizeof(int2) * wordLookupDFA_numExtPositions; cudaMalloc((void **)&wordLookupDFA_groupD, sizeof(struct groupFP) * groupNum); cudaMalloc((void **)&wordLookupDFAD, wordLookupDFA_size); cudaMemset(blast_numUngappedExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); cudaMemset(blast_numHitsD, 0, sizeof(uint4) * nTotalThreadNum); cudaMemcpy(wordLookupDFA_groupD, wordLookupDFA_groupsFP, sizeof(struct groupFP) * groupNum, cudaMemcpyHostToDevice); // //Use constant memory for the word lookup table group // cudaMemcpyToSymbol(wordLookupDFA_groupsC, wordLookupDFA_groupsFP, //sizeof(struct groupFP) * groupNum); // // //Use constant memory to store score matrix // int scoreMatrixSize = encoding_numCodes * encoding_numCodes; // cudaMemcpyToSymbol(scoreMatrixC, // ((char *)scoreMatrixp->matrix) + sizeof(int2 *) * //encoding_numCodes, // sizeof(int2) * scoreMatrixSize); // //Use constant memory to store query sequence // unsigned char *tempQueryCode; // tempQueryCode = (unsigned char *)global_malloc(sizeof(unsigned char) * //(PSSMatrixFP.length + 2)); // memcpy(&tempQueryCode[1], PSSMatrixFP.queryCodes, sizeof(unsigned char) //* PSSMatrixFP.length); // tempQueryCode[0] = encoding_sentinalCode; // tempQueryCode[PSSMatrixFP.length + 1] = encoding_sentinalCode; // cudaMemcpyToSymbol(querySequenceC, tempQueryCode, sizeof(unsigned char) //* (PSSMatrixFP.length + 2)); // free(tempQueryCode); cudaMemcpy(wordLookupDFAD, wordLookupDFA, wordLookupDFA_size, cudaMemcpyHostToDevice); // uint4 iniVal = nTotalThreadNum; // get t1 gettimeofday(&t1, NULL); int4 numSequencesRound, numSequenceProcessed; numSequenceProcessed = 0; while (sequenceCount < numSequences) { // get t2 gettimeofday(&t2, NULL); memset(hitMatrix_offsetH, 0, sizeof(int4) * (nTotalThreadNum + 1)); roundStartAddress = sequenceData[sequenceCount].sequence - 1; for (i = 0; sequenceCount < numSequences; i++, sequenceCount++) { sequenceDataFP[i].descriptionLength = sequenceData[sequenceCount].descriptionLength; sequenceDataFP[i].descriptionStart = sequenceData[sequenceCount].descriptionStart; sequenceDataFP[i].sequenceLength = sequenceData[sequenceCount].sequenceLength; sequenceDataFP[i].encodedLength = sequenceData[sequenceCount].encodedLength; sequenceDataFP[i].offset = sequenceData[sequenceCount].sequence - roundStartAddress; // Calculate the longest sequence size aligned by the current thread if (sequenceDataFP[i].sequenceLength > hitMatrix_offsetH[(i % nTotalThreadNum) + 1]) { hitMatrix_offsetH[(i % nTotalThreadNum) + 1] = sequenceDataFP[i].sequenceLength; } // about 130MB if (sequenceDataFP[i].offset + sequenceData[sequenceCount].encodedLength > 130000000) { i++; sequenceCount++; break; } } nRoundOffset = sequenceDataFP[i - 1].offset + sequenceDataFP[i - 1].encodedLength; numSequencesRound = i; // Calculate the offset of each thread for (i = 1; i < nTotalThreadNum + 1; i++) { hitMatrix_offsetH[i] += hitMatrix_offsetH[i - 1] + (PSSMatrixFP.length - parameters_wordSize + 1); } // copy offset info to device cudaMemcpy(hitMatrix_offsetD, hitMatrix_offsetH, (nTotalThreadNum + 1) * sizeof(int4), cudaMemcpyHostToDevice); // get t3 gettimeofday(&t3, NULL); // Allocate device memory // cudaMalloc((void **)&sequencesD, sizeof(unsigned char) * (nRoundOffset //+ 2)); // Allocate diagonal buffers int nElemNum = hitMatrix_offsetH[nTotalThreadNum]; cudaMalloc((void **)&hitMatrix_furthestD, sizeof(uint4) * nElemNum); cudaMemset(hitMatrix_furthestD, 0, sizeof(uint4) * nElemNum); cudaMemset(blast_numTriggerExtensionsD, 0, sizeof(uint4) * nTotalThreadNum); // Copy data to device cudaMemcpy(sequenceDataFPD, sequenceDataFP, sizeof(struct sequenceDataFP) * numSequencesRound, cudaMemcpyHostToDevice); cudaMemcpy(sequencesD, roundStartAddress, sizeof(unsigned char) * (nRoundOffset + 2), cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(global_sequenceCount, &iniVal, sizeof(uint4)); // get t4 gettimeofday(&t4, NULL); // all the required data are copied to device, launch the kernel search_protein1hitKernel << <dimGrid, dimBlock>>> (PSSMatrixFPD, matrixBodyD, sequenceDataFPD, sequencesD, parametersD, wordLookupDFA_groupD, wordLookupDFAD, blast_numUngappedExtensionsD, blast_numTriggerExtensionsD, blast_numHitsD, hitMatrix_furthestD, hitMatrix_offsetD, ungappedExtensionsD, numSequencesRound); cudaThreadSynchronize(); // get t5 gettimeofday(&t5, NULL); // Post processing // copy hit results back cudaMemcpy(blast_numTriggerExtensionsH, blast_numTriggerExtensionsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(ungappedExtension, ungappedExtensionsD, nUngappedExtensionNum * sizeof(struct ungappedExtension), cudaMemcpyDeviceToHost); cudaMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); // get t6 gettimeofday(&t6, NULL); // Add hits to the alignment list for (i = 0; i < nTotalThreadNum; i++) { if (blast_numTriggerExtensionsH[i] > 0) { ungappedExtensionCur = ungappedExtension + i * UNGAPEXT_PER_THREAD; preSequenceCount = INT_MAX; for (j = 0; j < blast_numTriggerExtensionsH[i]; j++) { if (ungappedExtensionCur[j].sequenceCount != preSequenceCount) { alignments_createNew( sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionStart, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].descriptionLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequence, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].sequenceLength, sequenceData[ungappedExtensionCur[j].sequenceCount + numSequenceProcessed].encodedLength); preSequenceCount = ungappedExtensionCur[j].sequenceCount; } newUngappedExtension = (struct ungappedExtension *)memBlocks_newEntry( ungappedExtension_extensions); memcpy(newUngappedExtension, &ungappedExtensionCur[j], sizeof(struct ungappedExtension)); alignments_addUngappedExtension(newUngappedExtension); } blast_numTriggerExtensions += blast_numTriggerExtensionsH[i]; } } numSequenceProcessed += numSequencesRound; cudaFree(hitMatrix_furthestD); // cudaFree(sequencesD); // get t7 gettimeofday(&t7, NULL); // aggregate execution time timeRecord.preProcessTime += (1000000 * (t3.tv_sec - t2.tv_sec) + t3.tv_usec - t2.tv_usec); timeRecord.dataCopyTimeH2D += (1000000 * (t4.tv_sec - t3.tv_sec) + t4.tv_usec - t3.tv_usec); timeRecord.searchTime += (1000000 * (t5.tv_sec - t4.tv_sec) + t5.tv_usec - t4.tv_usec); timeRecord.dataCopyTimeD2H += (1000000 * (t6.tv_sec - t5.tv_sec) + t6.tv_usec - t5.tv_usec); timeRecord.addUngappedExtensionTime += (1000000 * (t7.tv_sec - t6.tv_sec) + t7.tv_usec - t6.tv_usec); } // After all sequences are processed cudaMemcpy(blast_numUngappedExtensionsH, blast_numUngappedExtensionsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(blast_numHitsH, blast_numHitsD, sizeof(uint4) * nTotalThreadNum, cudaMemcpyDeviceToHost); for (j = 0; j < nTotalThreadNum; j++) { blast_numUngappedExtensions += blast_numUngappedExtensionsH[j]; blast_numHits += blast_numHitsH[j]; } cudaFree(PSSMatrixFPD); cudaFree(matrixBodyD); cudaFree(sequenceDataFPD); cudaFree(ungappedExtensionsD); cudaFree(blast_numUngappedExtensionsD); cudaFree(blast_numTriggerExtensionsD); cudaFree(blast_numHitsD); cudaFree(parametersD); cudaFree(wordLookupDFA_groupD); cudaFree(wordLookupDFAD); cudaFree(hitMatrix_offsetD); cudaFree(sequencesD); free(sequenceDataFP); free(ungappedExtension); free(blast_numUngappedExtensionsH); free(blast_numTriggerExtensionsH); free(blast_numHitsH); free(hitMatrix_offsetH); // get t8 gettimeofday(&t8, NULL); // Record time timeRecord.iniTime = 1000000 * (t1.tv_sec - t0.tv_sec) + t1.tv_usec - t0.tv_usec; timeRecord.postProcessTime = 1000000 * (t8.tv_sec - t7.tv_sec) + t8.tv_usec - t7.tv_usec; timeRecord.hitUngappedExtTime = 1000000 * (t8.tv_sec - t1.tv_sec) + t8.tv_usec - t1.tv_usec; } int findStartLoc(struct ungappedExtension *ungappedExtensionsPtr, int threadNo, int itemNum) { int i; for (i = 0; i < itemNum; i++) { if (ungappedExtensionsPtr[i].tid == threadNo) { return i; } } return -1; } __device__ struct coordinate ungappedExtension_findProteinSeed_sm( struct ungappedExtension *ungappedExtension, // struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes, // char *matrix unsigned char *querySequence, int2 *scoreMatrix) { // char *queryWindowStart, *queryWindowEnd; int2 queryWindowStart, queryWindowEnd; unsigned char *subjectWindowStart, *subjectWindowEnd; int2 bestQueryPosition; unsigned char *bestSubjectPosition; int4 bestSegmentScore; int4 nominalScore, count; struct coordinate seed; if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11) { // The seed point is the middle of the extension seed.queryOffset = (ungappedExtension->end.queryOffset + ungappedExtension->start.queryOffset) / 2; seed.subjectOffset = (ungappedExtension->end.subjectOffset + ungappedExtension->start.subjectOffset) / 2; } else { // Else find the highest scoring length-11 segment of the ungapped extension // queryWindowStart = queryWindowEnd = matrix + // ungappedExtension->start.queryOffset * encoding_numCodes; queryWindowStart = queryWindowEnd = ungappedExtension->start.queryOffset + 1; // subjectWindowStart = subjectWindowEnd = subject + // ungappedExtension->start.subjectOffset; subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset; // Find initial score for first 11 positions nominalScore = 0; count = 0; while (count < 11) { // nominalScore += queryWindowEnd[*subjectWindowEnd]; // queryWindowEnd += encoding_numCodes; nominalScore += scoreMatrix[querySequence[queryWindowEnd] * encoding_numCodes + (*subjectWindowEnd)]; queryWindowEnd++; subjectWindowEnd++; count++; } // queryWindowEnd -= encoding_numCodes; queryWindowEnd--; subjectWindowEnd--; // By default first-11 positions gives best position and score bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; bestSegmentScore = nominalScore; // Now slide the window across and record the better scores/positions // while (queryWindowEnd < matrix + ungappedExtension->end.queryOffset * // encoding_numCodes) while (queryWindowEnd < ungappedExtension->end.queryOffset + 1) { // Advance window end, add new position value // queryWindowEnd += encoding_numCodes; queryWindowEnd++; subjectWindowEnd++; // nominalScore += queryWindowEnd[*subjectWindowEnd]; nominalScore += scoreMatrix[querySequence[queryWindowEnd] * encoding_numCodes + (*subjectWindowEnd)]; // Remove position that we will leave behind // nominalScore -= queryWindowStart[*subjectWindowStart]; nominalScore -= scoreMatrix[querySequence[queryWindowStart] * encoding_numCodes + (*subjectWindowStart)]; // queryWindowStart += encoding_numCodes; queryWindowStart++; subjectWindowStart++; // Check if best window position yet if (nominalScore > bestSegmentScore) { bestSegmentScore = nominalScore; bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; } } // Middle of the best window is the seed position seed.queryOffset = (bestQueryPosition - 1) + 5; seed.subjectOffset = bestSubjectPosition + 5 - subject; } return seed; } __global__ void ungappedExtension_twoHitExtendG_findProteinSeed( struct parameters *parametersFP_g, struct PSSMatrixFP *PSSMatrixFP_g, unsigned char *sequence, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; struct parameters *parametersFP = parametersFP_g; __shared__ int2 scoreMatrix_s[1024]; extern __shared__ unsigned char querySequence_s[]; unsigned encoding_numCodes = parametersFP->encoding_numCodes; for (unsigned int ii = tt; ii < encoding_numCodes * encoding_numCodes; ii += blockDim.x) { scoreMatrix_s[ii] = scoreMatrixC[ii]; } for (unsigned int ii = tt; ii < PSSMatrixFP_g->length + 2; ii += blockDim.x) { querySequence_s[ii] = querySequenceC[ii]; } __syncthreads(); struct ungappedExtension *ungappedExtension_current = ungappedExtension_extensionsp + tid * parametersFP->ungappedExtensionsPerThread; // unsigned int tBins = BLOCK_SIZE * NUM_BLOCK; // for( unsigned int bb = tid; bb < tBins; bb += gridDim.x * blockDim.x) { unsigned int numExtensions = blast_numTriggerExtensions[tid]; for (unsigned int w_id = 0; w_id < numExtensions; w_id++) { int4 sequenceCount = ungappedExtension_current[w_id].sequenceCount; unsigned char *subject = sequence + sequenceDataFP[sequenceCount].offset; ungappedExtension_current[w_id].seed = ungappedExtension_findProteinSeed_sm( &(ungappedExtension_current[w_id]), subject, encoding_numCodes, querySequence_s, scoreMatrix_s); } } } __global__ void ungappedExtension_twoHitExtendG_bin_sorted_sm_s( struct parameters *parametersFP, uint64_t *HitInfo_g, uint4 *blast_numUngappedExtensions, unsigned char *sequence, struct PSSMatrixFP *PSSMatrixFP, struct sequenceDataFP *sequenceDataFP, struct ungappedExtension *ungappedExtension_extensionsp, uint4 *blast_numTriggerExtensions, int *numOneHitsD, int *numExtD, int *binOffset_g) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int tt = threadIdx.x; int laneId = tt & 31; // int warpId = tid >> 5; struct ungappedExtension *ungappedExtension_additional = ungappedExtension_extensionsp + parametersFP->ungappedExtAdditionalStartLoc; unsigned char *ungappedExtension_subjectEndReachedFP = 0; __shared__ int2 scoreMatrix_s[1024]; extern __shared__ unsigned char querySequence_s[]; unsigned encoding_numCodes = parametersFP->encoding_numCodes; for (unsigned int ii = tt; ii < encoding_numCodes * encoding_numCodes; ii += blockDim.x) { scoreMatrix_s[ii] = scoreMatrixC[ii]; } for (unsigned int ii = tt; ii < PSSMatrixFP->length + 2; ii += blockDim.x) { querySequence_s[ii] = querySequenceC[ii]; } __syncthreads(); // uint4 numOfTriggerExtensions_s = 0; __shared__ uint4 numOfTriggerExtensions_s[BLOCK_SIZE]; uint4 *numOfTriggerExtensions_w = numOfTriggerExtensions_s + (tt >> 5 << 5); // uint4 *numOfTriggerExtensions_w = numOfTriggerExtensions_s; numOfTriggerExtensions_w[laneId] = 0; // numOfTriggerExtensions_w[tt] = 0; uint4 blast_numUngappedExtensions_s = 0; // ungappedExtension_extensionsp->start.subjectOffset = 0; struct ungappedExtension *ungappedExtension_w = ungappedExtension_extensionsp + (tid >> 5 << 5) * parametersFP->ungappedExtensionsPerThread; // struct ungappedExtension *ungappedExtension_w = // ungappedExtension_extensionsp + // blockIdx.x * BLOCK_SIZE * parametersFP->ungappedExtensionsPerThread; unsigned int num_bins = BLOCK_SIZE * NUM_BLOCK * BIN_X; for (unsigned int bb = tid; bb < num_bins; bb += gridDim.x * blockDim.x) // unsigned int b_start = warpId << BIN_POWER; // unsigned int b_end = (warpId + 1) << BIN_POWER; // for(unsigned int bb = b_start + laneId; bb < b_end; bb += 32) { uint64_t *HitInfo_t = HitInfo_g + binOffset_g[bb] - numOneHitsD[bb]; unsigned int numHits_t = numExtD[bb]; uint64_t prev_ext = 0; for (unsigned int w_id = 0; w_id < numHits_t; w_id += 2) { uint64_t prev = HitInfo_t[w_id] > prev_ext ? HitInfo_t[w_id] : prev_ext; uint64_t curr = HitInfo_t[w_id + 1]; if (prev < curr) { blast_numUngappedExtensions_s++; uint4 sequenceCount = (uint4)(curr >> 32); int2 diagonal = (int2)((curr >> 16) & 0xffff) - 0x3fff; uint2 subjectOffset = (uint2)(curr & 0xffff); uint2 queryOffset = subjectOffset - diagonal; unsigned char *subject = sequence + sequenceDataFP[sequenceCount].offset; unsigned char *address = subject + subjectOffset; unsigned char *lastHit_addr = subject + (uint2)(prev & 0xffff); // int bin_id = sequenceCount & 127; int bin_id = sequenceCount & 31; struct ungappedExtension *ungappedExtension_current = ungappedExtension_w + bin_id * parametersFP->ungappedExtensionsPerThread; // If only one hit triggered this extension ungappedExtension_twoHitExtendD_sm( sequence, queryOffset, address, lastHit_addr, subject, &ungappedExtension_subjectEndReachedFP, parametersFP->encoding_numCodes, parametersFP->statistics_ungappedNominalDropoff, parametersFP->blast_ungappedNominalTrigger, parametersFP->ungappedExtensionsPerThread, ungappedExtension_current, ungappedExtension_additional, &(numOfTriggerExtensions_w[bin_id]), sequenceCount, scoreMatrix_s, querySequence_s); prev_ext = (curr & 0xffffffffffff0000) + (ungappedExtension_subjectEndReachedFP - subject); } } } blast_numTriggerExtensions[tid] = numOfTriggerExtensions_w[laneId]; blast_numUngappedExtensions[tid] += blast_numUngappedExtensions_s; } __device__ struct coordinate ungappedExtension_findProteinSeed( struct ungappedExtension *ungappedExtension, struct PSSMatrixFP PSSMatrixFP, unsigned char *subject, unsigned char encoding_numCodes) { int2 *queryWindowStart, *queryWindowEnd; unsigned char *subjectWindowStart, *subjectWindowEnd; int2 *bestQueryPosition; unsigned char *bestSubjectPosition; int4 bestSegmentScore; int4 nominalScore, count; struct coordinate seed; if (ungappedExtension->end.queryOffset - ungappedExtension->start.queryOffset < 11) { // The seed point is the middle of the extension seed.queryOffset = (ungappedExtension->end.queryOffset + ungappedExtension->start.queryOffset) / 2; seed.subjectOffset = (ungappedExtension->end.subjectOffset + ungappedExtension->start.subjectOffset) / 2; } else { // Else find the highest scoring length-11 segment of the ungapped extension queryWindowStart = queryWindowEnd = PSSMatrixFP.matrix + ungappedExtension->start.queryOffset * encoding_numCodes; subjectWindowStart = subjectWindowEnd = subject + ungappedExtension->start.subjectOffset; // Find initial score for first 11 positions nominalScore = 0; count = 0; while (count < 11) { nominalScore += queryWindowEnd[*subjectWindowEnd]; queryWindowEnd += encoding_numCodes; subjectWindowEnd++; count++; } queryWindowEnd -= encoding_numCodes; subjectWindowEnd--; // By default first-11 positions gives best position and score bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; bestSegmentScore = nominalScore; // Now slide the window across and record the better scores/positions while (queryWindowEnd < PSSMatrixFP.matrix + ungappedExtension->end.queryOffset * encoding_numCodes) { // Advance window end, add new position value queryWindowEnd += encoding_numCodes; subjectWindowEnd++; nominalScore += queryWindowEnd[*subjectWindowEnd]; // Remove position that we will leave behind nominalScore -= queryWindowStart[*subjectWindowStart]; queryWindowStart += encoding_numCodes; subjectWindowStart++; // Check if best window position yet if (nominalScore > bestSegmentScore) { bestSegmentScore = nominalScore; bestQueryPosition = queryWindowStart; bestSubjectPosition = subjectWindowStart; } } // Middle of the best window is the seed position seed.queryOffset = (bestQueryPosition - PSSMatrixFP.matrix) / encoding_numCodes + 5; seed.subjectOffset = bestSubjectPosition + 5 - subject; } return seed; }
b3811085bc118fbbf26f92900f97907304980691.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/backends/gpu/gpu_device_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += phi::backends::gpu::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += phi::backends::gpu::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T, typename DeviceContext> class RowConvKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<phi::DenseTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *Out = context.Output<phi::DenseTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); bool is_tensor = X->lod().empty(); int batch_size = 0; if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; phi::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; phi::MixVector<size_t> mix_vector(&batch_indices); size_t *idx = mix_vector.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvForwardSharedMemory<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvForward<T>), dim3(grid_dim), dim3(block_dim), 0, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } mix_vector.CopyToCPU(); } }; template <typename T, typename DeviceContext> class RowConvGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<phi::DenseTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *dOut = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); phi::DenseTensor *dX = context.Output<phi::DenseTensor>(framework::GradVarName("X")); phi::DenseTensor *dFilter = context.Output<phi::DenseTensor>(framework::GradVarName("Filter")); int batch_size = 0; bool is_tensor = X->lod().empty(); if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; phi::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } // int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; phi::MixVector<size_t> mixv_batch_indices(&batch_indices); size_t *idx = mixv_batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); phi::funcs::SetConstant<phi::GPUContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); hipLaunchKernelGGL(( RowConvGradFilterImproved<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 hipLaunchKernelGGL(( RowConvGradFilter<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvGradInputSharedMemory<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvGradInput<T>), dim3(grid_dim), dim3(block_dim), 0, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } } mixv_batch_indices.CopyToCPU(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( row_conv, GPU, ALL_LAYOUT, ops::RowConvKernel, float) {} PD_REGISTER_STRUCT_KERNEL( row_conv_grad, GPU, ALL_LAYOUT, ops::RowConvGradKernel, float) {}
b3811085bc118fbbf26f92900f97907304980691.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/backends/gpu/gpu_device_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += phi::backends::gpu::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += phi::backends::gpu::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T, typename DeviceContext> class RowConvKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<phi::DenseTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *Out = context.Output<phi::DenseTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); bool is_tensor = X->lod().empty(); int batch_size = 0; if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; phi::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; phi::MixVector<size_t> mix_vector(&batch_indices); size_t *idx = mix_vector.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvForwardSharedMemory<T> <<<grid_dim, block_dim, mem_per_block, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvForward<T><<<grid_dim, block_dim, 0, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } mix_vector.CopyToCPU(); } }; template <typename T, typename DeviceContext> class RowConvGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<phi::DenseTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *dOut = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); phi::DenseTensor *dX = context.Output<phi::DenseTensor>(framework::GradVarName("X")); phi::DenseTensor *dFilter = context.Output<phi::DenseTensor>(framework::GradVarName("Filter")); int batch_size = 0; bool is_tensor = X->lod().empty(); if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; phi::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } // int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; phi::MixVector<size_t> mixv_batch_indices(&batch_indices); size_t *idx = mixv_batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); phi::funcs::SetConstant<phi::GPUContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); RowConvGradFilterImproved<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 RowConvGradFilter<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvGradInputSharedMemory<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvGradInput<T><<<grid_dim, block_dim, 0, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } } mixv_batch_indices.CopyToCPU(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( row_conv, GPU, ALL_LAYOUT, ops::RowConvKernel, float) {} PD_REGISTER_STRUCT_KERNEL( row_conv_grad, GPU, ALL_LAYOUT, ops::RowConvGradKernel, float) {}
b5cbe64b4c41ad236fb591ab2b61813e9d4926c5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "potForce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *PairWise = NULL; hipMalloc(&PairWise, XSIZE*YSIZE); int N = XSIZE*YSIZE; float *PotOut = NULL; hipMalloc(&PotOut, XSIZE*YSIZE); float *ForceOut = NULL; hipMalloc(&ForceOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( potForce), dim3(gridBlock),dim3(threadBlock), 0, 0, PairWise,N,PotOut,ForceOut); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( potForce), dim3(gridBlock),dim3(threadBlock), 0, 0, PairWise,N,PotOut,ForceOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( potForce), dim3(gridBlock),dim3(threadBlock), 0, 0, PairWise,N,PotOut,ForceOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b5cbe64b4c41ad236fb591ab2b61813e9d4926c5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "potForce.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *PairWise = NULL; cudaMalloc(&PairWise, XSIZE*YSIZE); int N = XSIZE*YSIZE; float *PotOut = NULL; cudaMalloc(&PotOut, XSIZE*YSIZE); float *ForceOut = NULL; cudaMalloc(&ForceOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); potForce<<<gridBlock,threadBlock>>>(PairWise,N,PotOut,ForceOut); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { potForce<<<gridBlock,threadBlock>>>(PairWise,N,PotOut,ForceOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { potForce<<<gridBlock,threadBlock>>>(PairWise,N,PotOut,ForceOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d1f2a7d1854f68f354d997cf038698f196c6b9da.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "set_coords_3D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *coords = NULL; hipMalloc(&coords, XSIZE*YSIZE); size_t z = 1; size_t y = 1; size_t x = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( set_coords_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,z,y,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( set_coords_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,z,y,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( set_coords_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,z,y,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d1f2a7d1854f68f354d997cf038698f196c6b9da.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "set_coords_3D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *coords = NULL; cudaMalloc(&coords, XSIZE*YSIZE); size_t z = 1; size_t y = 1; size_t x = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); set_coords_3D<<<gridBlock,threadBlock>>>(coords,z,y,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { set_coords_3D<<<gridBlock,threadBlock>>>(coords,z,y,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { set_coords_3D<<<gridBlock,threadBlock>>>(coords,z,y,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
539ee60923f16e38d213f8137ad36e544a36bd8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_calculus.h" #include "calculus.h" #include "math.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #define BLOCKDIM 32 #define MAXARRAYSIZE 130000000 // supoe GPU RAM < 1GB hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); return result; } // realiza a reducao do vetor g_idata e devolve o resultado em g_odata __global__ void reduction(double *g_idata, long long n, double *g_odata) { extern __shared__ double sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem unsigned int s = blockDim.x / 2; while (s > 0) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); s /= 2; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } } // recebe um vetor x e calcula f(x) inplace __global__ void calculate_fx(double *x, long long n, long long k, long long M) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) x[i] = (sin((2 * M + 1) * M_PI * x[i]) * cos(2 * M_PI * k * x[i])) / sin(M_PI * x[i]); } // recebe um vetor f(x) e calcula f(x)^2 inplace __global__ void calculate_fx_2(double *fx, long long n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) fx[i] = fx[i] * fx[i]; } // cria um vetor de tamanho n com x aleatrio entre (0, 0.5] static double *create_random_x(long long n) { double *x = create_empty_array(n); for (int i = 0; i < n; i++) x[i] = get_random_x(); return x; } // recebe N, k, M e calcula <f> e <f^2> void gpu_get_f(long long N, long long k, long long M, double *f, double *f2) { *f = *f2 = 0; for (long long task = N; task > 0; task -= MAXARRAYSIZE) { long long n = (task < MAXARRAYSIZE) ? task : MAXARRAYSIZE; long long grid_dim = (n + BLOCKDIM-1) / BLOCKDIM; double *x_h = create_random_x(n); double *result_h = (double *) malloc(grid_dim * sizeof(double)); double *x_d, *result_d; checkCuda( hipMalloc((void **) &x_d, n * sizeof(double)) ); checkCuda( hipMalloc((void **) &result_d, grid_dim * sizeof(double)) ); // calcula f(x) no device checkCuda( hipMemcpy( x_d, x_h, n * sizeof(double), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( calculate_fx), dim3(grid_dim), dim3(BLOCKDIM), 0, 0, x_d, n, k, M); // reduz f(x) no device e termina no host hipLaunchKernelGGL(( reduction) , dim3(grid_dim), dim3(BLOCKDIM), BLOCKDIM, 0, x_d, n, result_d); checkCuda( hipMemcpy( result_h, result_d, grid_dim * sizeof(double), hipMemcpyDeviceToHost) );; for (int i = 0; i < grid_dim; i++) *f += result_h[i]; // calcula f(x)^2 no device hipLaunchKernelGGL(( calculate_fx_2), dim3(grid_dim), dim3(BLOCKDIM), 0, 0, x_d, n); // reduz f(x)^2 no device e termina no host hipLaunchKernelGGL(( reduction) , dim3(grid_dim), dim3(BLOCKDIM), BLOCKDIM, 0, x_d, n, result_d); checkCuda( hipMemcpy( result_h, result_d, grid_dim * sizeof(double), hipMemcpyDeviceToHost) ); for (int i = 0; i < grid_dim; i++) *f2 += result_h[i]; // limpeza checkCuda( hipFree(x_d) ); checkCuda( hipFree(result_d) ); free(x_h); free(result_h); } *f /= N; // encontra <f> *f2 /= N; // encontra <f^2> } // recebe N, k, M, calcula os dois resultados da integral de Monte Carlo e devolve o tempo de execucao double gpu_monte_carlo(long long N, long long k, long long M, double *result_sum, double *result_sub) { double f, f2, start, finish; start = omp_get_wtime(); gpu_get_f(N, k, M, &f, &f2); *result_sum = monte_carlo_sum(f, f2, N); *result_sub = monte_carlo_sub(f, f2, N); finish = omp_get_wtime(); return finish - start; }
539ee60923f16e38d213f8137ad36e544a36bd8d.cu
#include "gpu_calculus.h" #include "calculus.h" #include "math.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #define BLOCKDIM 32 #define MAXARRAYSIZE 130000000 // supoe GPU RAM < 1GB cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); return result; } // realiza a reducao do vetor g_idata e devolve o resultado em g_odata __global__ void reduction(double *g_idata, long long n, double *g_odata) { extern __shared__ double sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem unsigned int s = blockDim.x / 2; while (s > 0) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); s /= 2; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } } // recebe um vetor x e calcula f(x) inplace __global__ void calculate_fx(double *x, long long n, long long k, long long M) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) x[i] = (sin((2 * M + 1) * M_PI * x[i]) * cos(2 * M_PI * k * x[i])) / sin(M_PI * x[i]); } // recebe um vetor f(x) e calcula f(x)^2 inplace __global__ void calculate_fx_2(double *fx, long long n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) fx[i] = fx[i] * fx[i]; } // cria um vetor de tamanho n com x aleatório entre (0, 0.5] static double *create_random_x(long long n) { double *x = create_empty_array(n); for (int i = 0; i < n; i++) x[i] = get_random_x(); return x; } // recebe N, k, M e calcula <f> e <f^2> void gpu_get_f(long long N, long long k, long long M, double *f, double *f2) { *f = *f2 = 0; for (long long task = N; task > 0; task -= MAXARRAYSIZE) { long long n = (task < MAXARRAYSIZE) ? task : MAXARRAYSIZE; long long grid_dim = (n + BLOCKDIM-1) / BLOCKDIM; double *x_h = create_random_x(n); double *result_h = (double *) malloc(grid_dim * sizeof(double)); double *x_d, *result_d; checkCuda( cudaMalloc((void **) &x_d, n * sizeof(double)) ); checkCuda( cudaMalloc((void **) &result_d, grid_dim * sizeof(double)) ); // calcula f(x) no device checkCuda( cudaMemcpy( x_d, x_h, n * sizeof(double), cudaMemcpyHostToDevice) ); calculate_fx<<<grid_dim, BLOCKDIM>>>(x_d, n, k, M); // reduz f(x) no device e termina no host reduction <<<grid_dim, BLOCKDIM, BLOCKDIM>>>(x_d, n, result_d); checkCuda( cudaMemcpy( result_h, result_d, grid_dim * sizeof(double), cudaMemcpyDeviceToHost) );; for (int i = 0; i < grid_dim; i++) *f += result_h[i]; // calcula f(x)^2 no device calculate_fx_2<<<grid_dim, BLOCKDIM>>>(x_d, n); // reduz f(x)^2 no device e termina no host reduction <<<grid_dim, BLOCKDIM, BLOCKDIM>>>(x_d, n, result_d); checkCuda( cudaMemcpy( result_h, result_d, grid_dim * sizeof(double), cudaMemcpyDeviceToHost) ); for (int i = 0; i < grid_dim; i++) *f2 += result_h[i]; // limpeza checkCuda( cudaFree(x_d) ); checkCuda( cudaFree(result_d) ); free(x_h); free(result_h); } *f /= N; // encontra <f> *f2 /= N; // encontra <f^2> } // recebe N, k, M, calcula os dois resultados da integral de Monte Carlo e devolve o tempo de execucao double gpu_monte_carlo(long long N, long long k, long long M, double *result_sum, double *result_sub) { double f, f2, start, finish; start = omp_get_wtime(); gpu_get_f(N, k, M, &f, &f2); *result_sum = monte_carlo_sum(f, f2, N); *result_sub = monte_carlo_sub(f, f2, N); finish = omp_get_wtime(); return finish - start; }
c1c37613b28ec2d30b72dc73c7fd074e1ba837c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <ks_force_quda.h> #include <index_helper.cuh> namespace quda { using namespace gauge; template<typename Oprod, typename Gauge, typename Mom> struct KSForceArg { int threads; int X[4]; // grid dimensions #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU int border[4]; #endif #endif Oprod oprod; Gauge gauge; Mom mom; KSForceArg(Oprod& oprod, Gauge &gauge, Mom& mom, int dim[4]) : oprod(oprod), gauge(gauge), mom(mom){ threads = 1; for(int dir=0; dir<4; ++dir) threads *= dim[dir]; for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]; #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir) border[dir] = 2; #endif #endif } }; template<typename Float, typename Oprod, typename Gauge, typename Mom> __host__ __device__ void completeKSForceCore(KSForceArg<Oprod,Gauge,Mom>& arg, int idx){ int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } int X[4]; for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir]; int x[4]; getCoords(x, idx, X, parity); #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } #endif #endif Matrix<complex<Float>,3> O; Matrix<complex<Float>,3> G; Matrix<complex<Float>,3> M; int dx[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir){ arg.gauge.load((Float*)(G.data), linkIndexShift(x,dx,X), dir, parity); arg.oprod.load((Float*)(O.data), linkIndexShift(x,dx,X), dir, parity); if(parity==0){ M = G*O; }else{ M = -G*O; } Float sub = getTrace(M).y/(static_cast<Float>(3)); Float temp[10]; temp[0] = (M.data[1].x - M.data[3].x)*0.5; temp[1] = (M.data[1].y + M.data[3].y)*0.5; temp[2] = (M.data[2].x - M.data[6].x)*0.5; temp[3] = (M.data[2].y + M.data[6].y)*0.5; temp[4] = (M.data[5].x - M.data[7].x)*0.5; temp[5] = (M.data[5].y + M.data[7].y)*0.5; temp[6] = (M.data[0].y-sub); temp[7] = (M.data[4].y-sub); temp[8] = (M.data[8].y-sub); temp[9] = 0.0; arg.mom.save(temp, idx, dir, parity); } } template<typename Float, typename Oprod, typename Gauge, typename Mom> __global__ void completeKSForceKernel(KSForceArg<Oprod,Gauge,Mom> arg) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx); } template<typename Float, typename Oprod, typename Gauge, typename Mom> void completeKSForceCPU(KSForceArg<Oprod,Gauge,Mom>& arg) { for(int idx=0; idx<arg.threads; idx++){ completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx); } } template<typename Float, typename Oprod, typename Gauge, typename Mom> class KSForceComplete : Tunable { KSForceArg<Oprod, Gauge, Mom> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: KSForceComplete(KSForceArg<Oprod,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.mom.stride); } virtual ~KSForceComplete() {} void apply(const hipStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ // Fix this dim3 blockDim(128, 1, 1); dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1); hipLaunchKernelGGL(( completeKSForceKernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, arg); }else{ completeKSForceCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 792*arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3]; } long long bytes() const { return 0; } // Fix this }; template<typename Float, typename Oprod, typename Gauge, typename Mom> void completeKSForce(Oprod oprod, Gauge gauge, Mom mom, int dim[4], const GaugeField &meta, QudaFieldLocation location, long long *flops) { KSForceArg<Oprod,Gauge,Mom> arg(oprod, gauge, mom, dim); KSForceComplete<Float,Oprod,Gauge,Mom> completeForce(arg,meta,location); completeForce.apply(0); if(flops) *flops = completeForce.flops(); hipDeviceSynchronize(); } template<typename Float> void completeKSForce(GaugeField& mom, const GaugeField& oprod, const GaugeField& gauge, QudaFieldLocation location, long long *flops) { if(location != QUDA_CUDA_FIELD_LOCATION){ errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported"); }else{ if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (mom.Reconstruct() != QUDA_RECONSTRUCT_10)){ errorQuda("Reconstruct type not supported"); }else{ completeKSForce<Float>(FloatNOrder<Float, 18, 2, 18>(oprod), FloatNOrder<Float, 18, 2, 18>(gauge), FloatNOrder<Float, 10, 2, 10>(mom), const_cast<int*>(mom.X()), gauge, location, flops); } } return; } void completeKSForce(GaugeField &mom, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location, long long *flops) { if(mom.Precision() == QUDA_HALF_PRECISION){ errorQuda("Half precision not supported"); } if(mom.Precision() == QUDA_SINGLE_PRECISION){ completeKSForce<float>(mom, oprod, gauge, location, flops); }else if(mom.Precision() == QUDA_DOUBLE_PRECISION){ completeKSForce<double>(mom, oprod, gauge, location, flops); }else{ errorQuda("Precision %d not supported", mom.Precision()); } return; } template<typename Result, typename Oprod, typename Gauge> struct KSLongLinkArg { int threads; int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif double coeff; Result res; Oprod oprod; Gauge gauge; KSLongLinkArg(Result& res, Oprod& oprod, Gauge &gauge, int dim[4]) : coeff(1.0), res(res), oprod(oprod), gauge(gauge){ threads = 1; #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir) threads *= (dim[dir]-2); for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]-2; for(int dir=0; dir<4; ++dir) border[dir] = 2; #else for(int dir=0; dir<4; ++dir) threads *= dim[dir]; for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]; #endif } }; template<typename Float, typename Result, typename Oprod, typename Gauge> __host__ __device__ void computeKSLongLinkForceCore(KSLongLinkArg<Result,Oprod,Gauge>& arg, int idx){ /* int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } int X[4]; for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir]; int x[4]; getCoords(x, idx, X, parity); #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } #endif #endif typedef complex<Float> Cmplx; Matrix<Cmplx,3> O; Matrix<Cmplx,3> G; Matrix<Cmplx,3> M; int dx[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir){ arg.gauge.load((Float*)(G.data), linkIndexShift(x,dx,X), dir, parity); arg.oprod.load((Float*)(O.data), linkIndexShift(x,dx,X), dir, parity); if(parity==0){ M = G*O; }else{ M = -G*O; } Float sub = getTrace(M).y/(static_cast<Float>(3)); Float temp[10]; temp[0] = (M.data[1].x - M.data[3].x)*0.5; temp[1] = (M.data[1].y + M.data[3].y)*0.5; temp[2] = (M.data[2].x - M.data[6].x)*0.5; temp[3] = (M.data[2].y + M.data[6].y)*0.5; temp[4] = (M.data[5].x - M.data[7].x)*0.5; temp[5] = (M.data[5].y + M.data[7].y)*0.5; temp[6] = (M.data[0].y-sub); temp[7] = (M.data[4].y-sub); temp[8] = (M.data[8].y-sub); temp[9] = 0.0; arg.mom.save(temp, idx, dir, parity); } */ } template<typename Float, typename Result, typename Oprod, typename Gauge> __global__ void computeKSLongLinkForceKernel(KSLongLinkArg<Result,Oprod,Gauge> arg) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx); } template<typename Float, typename Result, typename Oprod, typename Gauge> void computeKSLongLinkForceCPU(KSLongLinkArg<Result,Oprod,Gauge>& arg) { for(int idx=0; idx<arg.threads; idx++){ computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx); } } // should be tunable template<typename Float, typename Result, typename Oprod, typename Gauge> class KSLongLinkForce : Tunable { KSLongLinkArg<Result,Oprod,Gauge> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: KSLongLinkForce(KSLongLinkArg<Result,Oprod,Gauge> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.res.stride); } virtual ~KSLongLinkForce() {} void apply(const hipStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ // Fix this dim3 blockDim(128, 1, 1); dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1); hipLaunchKernelGGL(( computeKSLongLinkForceKernel<Float>), dim3(gridDim),dim3(blockDim), 0, 0, arg); }else{ computeKSLongLinkForceCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // Fix this long long bytes() const { return 0; } // Fix this }; template<typename Float, typename Result, typename Oprod, typename Gauge> void computeKSLongLinkForce(Result res, Oprod oprod, Gauge gauge, int dim[4], const GaugeField &meta, QudaFieldLocation location) { KSLongLinkArg<Result,Oprod,Gauge> arg(res, oprod, gauge, dim); KSLongLinkForce<Float,Result,Oprod,Gauge> computeLongLink(arg,meta,location); computeLongLink.apply(0); hipDeviceSynchronize(); } template<typename Float> void computeKSLongLinkForce(GaugeField& result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location) { if(location != QUDA_CUDA_FIELD_LOCATION){ errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported"); }else{ if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (result.Reconstruct() != QUDA_RECONSTRUCT_10)){ errorQuda("Reconstruct type not supported"); }else{ computeKSLongLinkForce<Float>(FloatNOrder<Float, 18, 2, 18>(result), FloatNOrder<Float, 18, 2, 18>(oprod), FloatNOrder<Float, 18, 2, 18>(gauge), const_cast<int*>(result.X()), gauge, location); } } return; } void computeKSLongLinkForce(GaugeField &result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location) { if(result.Precision() == QUDA_HALF_PRECISION){ errorQuda("Half precision not supported"); } if(result.Precision() == QUDA_SINGLE_PRECISION){ computeKSLongLinkForce<float>(result, oprod, gauge, location); }else if(result.Precision() == QUDA_DOUBLE_PRECISION){ computeKSLongLinkForce<double>(result, oprod, gauge, location); } errorQuda("Precision %d not supported", result.Precision()); return; } } // namespace quda
c1c37613b28ec2d30b72dc73c7fd074e1ba837c3.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <ks_force_quda.h> #include <index_helper.cuh> namespace quda { using namespace gauge; template<typename Oprod, typename Gauge, typename Mom> struct KSForceArg { int threads; int X[4]; // grid dimensions #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU int border[4]; #endif #endif Oprod oprod; Gauge gauge; Mom mom; KSForceArg(Oprod& oprod, Gauge &gauge, Mom& mom, int dim[4]) : oprod(oprod), gauge(gauge), mom(mom){ threads = 1; for(int dir=0; dir<4; ++dir) threads *= dim[dir]; for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]; #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir) border[dir] = 2; #endif #endif } }; template<typename Float, typename Oprod, typename Gauge, typename Mom> __host__ __device__ void completeKSForceCore(KSForceArg<Oprod,Gauge,Mom>& arg, int idx){ int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } int X[4]; for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir]; int x[4]; getCoords(x, idx, X, parity); #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } #endif #endif Matrix<complex<Float>,3> O; Matrix<complex<Float>,3> G; Matrix<complex<Float>,3> M; int dx[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir){ arg.gauge.load((Float*)(G.data), linkIndexShift(x,dx,X), dir, parity); arg.oprod.load((Float*)(O.data), linkIndexShift(x,dx,X), dir, parity); if(parity==0){ M = G*O; }else{ M = -G*O; } Float sub = getTrace(M).y/(static_cast<Float>(3)); Float temp[10]; temp[0] = (M.data[1].x - M.data[3].x)*0.5; temp[1] = (M.data[1].y + M.data[3].y)*0.5; temp[2] = (M.data[2].x - M.data[6].x)*0.5; temp[3] = (M.data[2].y + M.data[6].y)*0.5; temp[4] = (M.data[5].x - M.data[7].x)*0.5; temp[5] = (M.data[5].y + M.data[7].y)*0.5; temp[6] = (M.data[0].y-sub); temp[7] = (M.data[4].y-sub); temp[8] = (M.data[8].y-sub); temp[9] = 0.0; arg.mom.save(temp, idx, dir, parity); } } template<typename Float, typename Oprod, typename Gauge, typename Mom> __global__ void completeKSForceKernel(KSForceArg<Oprod,Gauge,Mom> arg) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx); } template<typename Float, typename Oprod, typename Gauge, typename Mom> void completeKSForceCPU(KSForceArg<Oprod,Gauge,Mom>& arg) { for(int idx=0; idx<arg.threads; idx++){ completeKSForceCore<Float,Oprod,Gauge,Mom>(arg,idx); } } template<typename Float, typename Oprod, typename Gauge, typename Mom> class KSForceComplete : Tunable { KSForceArg<Oprod, Gauge, Mom> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: KSForceComplete(KSForceArg<Oprod,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.mom.stride); } virtual ~KSForceComplete() {} void apply(const cudaStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ // Fix this dim3 blockDim(128, 1, 1); dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1); completeKSForceKernel<Float><<<gridDim,blockDim>>>(arg); }else{ completeKSForceCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 792*arg.X[0]*arg.X[1]*arg.X[2]*arg.X[3]; } long long bytes() const { return 0; } // Fix this }; template<typename Float, typename Oprod, typename Gauge, typename Mom> void completeKSForce(Oprod oprod, Gauge gauge, Mom mom, int dim[4], const GaugeField &meta, QudaFieldLocation location, long long *flops) { KSForceArg<Oprod,Gauge,Mom> arg(oprod, gauge, mom, dim); KSForceComplete<Float,Oprod,Gauge,Mom> completeForce(arg,meta,location); completeForce.apply(0); if(flops) *flops = completeForce.flops(); cudaDeviceSynchronize(); } template<typename Float> void completeKSForce(GaugeField& mom, const GaugeField& oprod, const GaugeField& gauge, QudaFieldLocation location, long long *flops) { if(location != QUDA_CUDA_FIELD_LOCATION){ errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported"); }else{ if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (mom.Reconstruct() != QUDA_RECONSTRUCT_10)){ errorQuda("Reconstruct type not supported"); }else{ completeKSForce<Float>(FloatNOrder<Float, 18, 2, 18>(oprod), FloatNOrder<Float, 18, 2, 18>(gauge), FloatNOrder<Float, 10, 2, 10>(mom), const_cast<int*>(mom.X()), gauge, location, flops); } } return; } void completeKSForce(GaugeField &mom, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location, long long *flops) { if(mom.Precision() == QUDA_HALF_PRECISION){ errorQuda("Half precision not supported"); } if(mom.Precision() == QUDA_SINGLE_PRECISION){ completeKSForce<float>(mom, oprod, gauge, location, flops); }else if(mom.Precision() == QUDA_DOUBLE_PRECISION){ completeKSForce<double>(mom, oprod, gauge, location, flops); }else{ errorQuda("Precision %d not supported", mom.Precision()); } return; } template<typename Result, typename Oprod, typename Gauge> struct KSLongLinkArg { int threads; int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif double coeff; Result res; Oprod oprod; Gauge gauge; KSLongLinkArg(Result& res, Oprod& oprod, Gauge &gauge, int dim[4]) : coeff(1.0), res(res), oprod(oprod), gauge(gauge){ threads = 1; #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir) threads *= (dim[dir]-2); for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]-2; for(int dir=0; dir<4; ++dir) border[dir] = 2; #else for(int dir=0; dir<4; ++dir) threads *= dim[dir]; for(int dir=0; dir<4; ++dir) X[dir] = dim[dir]; #endif } }; template<typename Float, typename Result, typename Oprod, typename Gauge> __host__ __device__ void computeKSLongLinkForceCore(KSLongLinkArg<Result,Oprod,Gauge>& arg, int idx){ /* int parity = 0; if(idx >= arg.threads/2){ parity = 1; idx -= arg.threads/2; } int X[4]; for(int dir=0; dir<4; ++dir) X[dir] = arg.X[dir]; int x[4]; getCoords(x, idx, X, parity); #ifndef BUILD_TIFR_INTERFACE #ifdef MULTI_GPU for(int dir=0; dir<4; ++dir){ x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } #endif #endif typedef complex<Float> Cmplx; Matrix<Cmplx,3> O; Matrix<Cmplx,3> G; Matrix<Cmplx,3> M; int dx[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir){ arg.gauge.load((Float*)(G.data), linkIndexShift(x,dx,X), dir, parity); arg.oprod.load((Float*)(O.data), linkIndexShift(x,dx,X), dir, parity); if(parity==0){ M = G*O; }else{ M = -G*O; } Float sub = getTrace(M).y/(static_cast<Float>(3)); Float temp[10]; temp[0] = (M.data[1].x - M.data[3].x)*0.5; temp[1] = (M.data[1].y + M.data[3].y)*0.5; temp[2] = (M.data[2].x - M.data[6].x)*0.5; temp[3] = (M.data[2].y + M.data[6].y)*0.5; temp[4] = (M.data[5].x - M.data[7].x)*0.5; temp[5] = (M.data[5].y + M.data[7].y)*0.5; temp[6] = (M.data[0].y-sub); temp[7] = (M.data[4].y-sub); temp[8] = (M.data[8].y-sub); temp[9] = 0.0; arg.mom.save(temp, idx, dir, parity); } */ } template<typename Float, typename Result, typename Oprod, typename Gauge> __global__ void computeKSLongLinkForceKernel(KSLongLinkArg<Result,Oprod,Gauge> arg) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx >= arg.threads) return; computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx); } template<typename Float, typename Result, typename Oprod, typename Gauge> void computeKSLongLinkForceCPU(KSLongLinkArg<Result,Oprod,Gauge>& arg) { for(int idx=0; idx<arg.threads; idx++){ computeKSLongLinkForceCore<Float,Result,Oprod,Gauge>(arg,idx); } } // should be tunable template<typename Float, typename Result, typename Oprod, typename Gauge> class KSLongLinkForce : Tunable { KSLongLinkArg<Result,Oprod,Gauge> arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune the shared memory. bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: KSLongLinkForce(KSLongLinkArg<Result,Oprod,Gauge> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("prec=%lu,stride=%d",sizeof(Float),arg.res.stride); } virtual ~KSLongLinkForce() {} void apply(const cudaStream_t &stream) { if(location == QUDA_CUDA_FIELD_LOCATION){ // Fix this dim3 blockDim(128, 1, 1); dim3 gridDim((arg.threads + blockDim.x - 1) / blockDim.x, 1, 1); computeKSLongLinkForceKernel<Float><<<gridDim,blockDim>>>(arg); }else{ computeKSLongLinkForceCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } // Fix this long long bytes() const { return 0; } // Fix this }; template<typename Float, typename Result, typename Oprod, typename Gauge> void computeKSLongLinkForce(Result res, Oprod oprod, Gauge gauge, int dim[4], const GaugeField &meta, QudaFieldLocation location) { KSLongLinkArg<Result,Oprod,Gauge> arg(res, oprod, gauge, dim); KSLongLinkForce<Float,Result,Oprod,Gauge> computeLongLink(arg,meta,location); computeLongLink.apply(0); cudaDeviceSynchronize(); } template<typename Float> void computeKSLongLinkForce(GaugeField& result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location) { if(location != QUDA_CUDA_FIELD_LOCATION){ errorQuda("Only QUDA_CUDA_FIELD_LOCATION currently supported"); }else{ if((oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) || (gauge.Reconstruct() != QUDA_RECONSTRUCT_NO) || (result.Reconstruct() != QUDA_RECONSTRUCT_10)){ errorQuda("Reconstruct type not supported"); }else{ computeKSLongLinkForce<Float>(FloatNOrder<Float, 18, 2, 18>(result), FloatNOrder<Float, 18, 2, 18>(oprod), FloatNOrder<Float, 18, 2, 18>(gauge), const_cast<int*>(result.X()), gauge, location); } } return; } void computeKSLongLinkForce(GaugeField &result, const GaugeField &oprod, const GaugeField &gauge, QudaFieldLocation location) { if(result.Precision() == QUDA_HALF_PRECISION){ errorQuda("Half precision not supported"); } if(result.Precision() == QUDA_SINGLE_PRECISION){ computeKSLongLinkForce<float>(result, oprod, gauge, location); }else if(result.Precision() == QUDA_DOUBLE_PRECISION){ computeKSLongLinkForce<double>(result, oprod, gauge, location); } errorQuda("Precision %d not supported", result.Precision()); return; } } // namespace quda
f5c0b880d43a37e3acdc203c886cf2c01f883cc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlaqps2_gpu.cu normal z -> s, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #include "commonblas_s.h" #define PRECISION_s // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- SLAQPS computes a step of QR factorization with column pivoting of a real M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] A REAL array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] tau REAL array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] VN1 REAL array, dimension (N) The vector with the partial column norms. @param[in,out] VN2 REAL array, dimension (N) The vector with the exact column norms. @param[in,out] AUXV REAL array, dimension (NB) Auxiliar vector. @param[in,out] F REAL array, dimension (LDF,NB) Matrix F' = L*Y'*A. @param[in] ldf INTEGER The leading dimension of the array F. LDF >= max(1,N). @ingroup magma_sgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_slaqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, float *A, magma_int_t lda, magma_int_t *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *F, magma_int_t ldf) { #define A(i, j) (A + (i) + (j)*(lda )) #define F(i, j) (F + (i) + (j)*(ldf )) float c_zero = MAGMA_S_MAKE( 0.,0.); float c_one = MAGMA_S_MAKE( 1.,0.); float c_neg_one = MAGMA_S_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; float tauk; magma_int_t pvt, itemp; float tol3z; float *dAkk = auxv; auxv+=nb; float lsticc, *lsticcs; magma_smalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione ); if (pvt != k) { magmablas_sswap( k+1, F(pvt,0), ldf, F(k,0), ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset); #else //magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset); #endif magmablas_sswap( m, A(0,pvt), ione, A(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_sgemv_conjv( m-rk, k, c_neg_one, A(rk, 0), lda, F(k, 0), ldf, c_one, A(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_slarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]); magma_ssetvector( 1, &c_one, 1, A(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_sgetvector( 1, &tau[k], 1, &tauk, 1 ); if (k < n-1) { magma_sgemv( MagmaConjTrans, m-rk, n-k-1, tauk, A( rk, k+1 ), lda, A( rk, k ), 1, c_zero, F( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_S_NEGATE( tauk ); magma_sgemv( MagmaConjTrans, m-rk, k, z__1, A(rk, 0), lda, A(rk, k), ione, c_zero, auxv, ione );*/ hipLaunchKernelGGL(( magma_sgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, A(rk, 0), lda, A(rk, k), auxv, tau+k); /* I think we only need stricly lower-triangular part */ magma_sgemv( MagmaNoTrans, n-k-1, k, c_one, F(k+1,0), ldf, auxv, ione, c_one, F(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A'v with original A, so no right-looking */ magma_sgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, A(rk, 0 ), lda, F(k+1,0 ), ldf, c_one, A(rk, k+1), lda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_snrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1], &vn2[k+1], A(rk,k+1), lda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*A(rk, k) = Akk; //magma_ssetvector( 1, &Akk, 1, A(rk, k), 1 ); //magmablas_slacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1); ++k; } // restore the diagonals magma_scopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_sgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, A(rk+1, 0 ), lda, F(*kb, 0 ), ldf, c_one, A(rk+1, *kb), lda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_snrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda, &vn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_slaqps */
f5c0b880d43a37e3acdc203c886cf2c01f883cc9.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlaqps2_gpu.cu normal z -> s, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #include "commonblas_s.h" #define PRECISION_s // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- SLAQPS computes a step of QR factorization with column pivoting of a real M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] A REAL array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] tau REAL array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] VN1 REAL array, dimension (N) The vector with the partial column norms. @param[in,out] VN2 REAL array, dimension (N) The vector with the exact column norms. @param[in,out] AUXV REAL array, dimension (NB) Auxiliar vector. @param[in,out] F REAL array, dimension (LDF,NB) Matrix F' = L*Y'*A. @param[in] ldf INTEGER The leading dimension of the array F. LDF >= max(1,N). @ingroup magma_sgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_slaqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, float *A, magma_int_t lda, magma_int_t *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *F, magma_int_t ldf) { #define A(i, j) (A + (i) + (j)*(lda )) #define F(i, j) (F + (i) + (j)*(ldf )) float c_zero = MAGMA_S_MAKE( 0.,0.); float c_one = MAGMA_S_MAKE( 1.,0.); float c_neg_one = MAGMA_S_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; float tauk; magma_int_t pvt, itemp; float tol3z; float *dAkk = auxv; auxv+=nb; float lsticc, *lsticcs; magma_smalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione ); if (pvt != k) { magmablas_sswap( k+1, F(pvt,0), ldf, F(k,0), ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset); #else //magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset); #endif magmablas_sswap( m, A(0,pvt), ione, A(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_sgemv_conjv( m-rk, k, c_neg_one, A(rk, 0), lda, F(k, 0), ldf, c_one, A(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_slarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]); magma_ssetvector( 1, &c_one, 1, A(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_sgetvector( 1, &tau[k], 1, &tauk, 1 ); if (k < n-1) { magma_sgemv( MagmaConjTrans, m-rk, n-k-1, tauk, A( rk, k+1 ), lda, A( rk, k ), 1, c_zero, F( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_S_NEGATE( tauk ); magma_sgemv( MagmaConjTrans, m-rk, k, z__1, A(rk, 0), lda, A(rk, k), ione, c_zero, auxv, ione );*/ magma_sgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, A(rk, 0), lda, A(rk, k), auxv, tau+k); /* I think we only need stricly lower-triangular part */ magma_sgemv( MagmaNoTrans, n-k-1, k, c_one, F(k+1,0), ldf, auxv, ione, c_one, F(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A'v with original A, so no right-looking */ magma_sgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, A(rk, 0 ), lda, F(k+1,0 ), ldf, c_one, A(rk, k+1), lda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_snrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1], &vn2[k+1], A(rk,k+1), lda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*A(rk, k) = Akk; //magma_ssetvector( 1, &Akk, 1, A(rk, k), 1 ); //magmablas_slacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1); ++k; } // restore the diagonals magma_scopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_sgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, A(rk+1, 0 ), lda, F(*kb, 0 ), ldf, c_one, A(rk+1, *kb), lda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_snrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda, &vn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_slaqps */
07c38c59cdf1e497b01a1f8c830f464a3e3286b6.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////// include & Definitions /////////////////////////////////////////////////////// #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define SIZE 1024 #define ALL_OK printf("\nProgram is working normally.\n"); //////////////////////////////// PARALLEL CODE /////////////////////////////////////////////////////// __global__ void trial(long int* prime, long int* number, long int* length, int* check) { long int i = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; // long int i = threadIdx.x + blockIdx.x * blockDim.x; if ((i > 1) && (i <= *length)) { if (prime[i]) { if (i * i == *number) { printf(" %d raised to square --------- proof: in fact %d * %d = %d, that is equal to value entered (%d)\n", i, i, i, (i * i), *number); *check = 1; } if (*number % i == 0 && i * i != *number) { printf(" %d and %d --------- proof: in fact %d * %d = %d, that is equal to value entered (%d)\n", i, (*number / i), i, (*number / i), (i * (*number / i)), *number); *check = 1; } } } void __syncthreads(); } /////////////////////////////////// END OF PARALLEL CODE ////////////////////////////////////////////////// /////////////////////////////////// SEQUENTIAL CODE ////////////////////////////////////////////////// void main() { int nDevices, * checkPrimeHost, * checkPrimeDeviceFinal; long int n = 2, threadsPerBlock, threadsDim, gridSize; static long int* d_length, * d_prime, * d_number; long int elim, number, * prime; bool exit_status = false; //Check GPU specs and if it is capable of run program. /*Guardare sempre I limiti massimi dellhw (warp size, SM, max blocks, max threads per block) 2. I thread allinterno del blocco dovrebbero essere multipli dello WARP size 3. Partire da numeri grandi di threads e blocks, calcolare lo speed up e provare a diminuire i parametri cercando la soluzione ideale */ hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf(" GPU SPECS\n"); printf("==========================================================================================\n"); printf("Device Number: %d\n", i); printf("Device Name: %s\n", prop.name); printf("Memory Warp Size: %d\n", prop.warpSize); printf("Memory Shared Memory Per Block: %d\n", (int)prop.sharedMemPerBlock); printf("Max Threads Per Block: %d\n", prop.maxThreadsPerBlock); printf("Max Threads Dimension: %d\n", prop.maxThreadsDim[3]); printf("Max Grid Size: %d\n", prop.maxGridSize[3]); printf("Multi Processor Count: %d\n", prop.multiProcessorCount); printf("Memory Pitch by memory copy: %d\n", (int)prop.memPitch); printf("Least device CC: %d\n", prop.major); printf("==========================================================================================\n\n"); threadsPerBlock = prop.maxThreadsPerBlock; threadsDim = prop.maxThreadsDim[3]; gridSize = prop.maxGridSize[3]; } while (exit_status == false) { printf(" BEGIN\n\nPlease, enter number to factorize. To exit insert 0 and then press enter.\n" "Note: do not insert values major than 2147483647 because of long int size.\n\n " " Number: "); scanf("%d", &number); if (number == 0) { exit_status = true; printf("\nExit status activated. Aborting process...\n\n\n"); break; } clock_t start, end; double tempo; start = clock(); long int length = floor(sqrt(number)); prime = (long int*)malloc(number * sizeof(long int)); hipMalloc((void**)&d_prime, SIZE * sizeof(long int)); hipMalloc((void**)&d_number, sizeof(long int)); hipMalloc((void**)&d_length, sizeof(long int)); hipMalloc((void**)&checkPrimeDeviceFinal, (sizeof(int))); checkPrimeHost = 0; //Eratosthenes Sieve for (long int i = 0; i < length; i++) { prime[i] = 1; } while (n <= length) { if (prime[n] == 1) { elim = n + n; while (elim <= length) { prime[elim] = 0; elim += n; } } n++; } for (int i = 2; i < length; i++) printf("%d: %d - ", i, prime[i]); printf("\nLa lunghezza e' di %d\n", length); hipMemcpy(d_prime, prime, SIZE * sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(d_number, &number, sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(d_length, &length, sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(checkPrimeDeviceFinal, checkPrimeHost, sizeof(int), hipMemcpyHostToDevice); long int requiredBlocks = (length / SIZE) + 1; dim3 grid(requiredBlocks, 1, 1); dim3 block(SIZE, 1, 1); printf("\n\nNumber %d can be factored in following manners:\n\n", number); trial << < grid, block >> > (d_prime, d_number, d_length, checkPrimeDeviceFinal); int* check2; check2 = (int*)malloc(sizeof(int)); hipMemcpy(check2, checkPrimeDeviceFinal, sizeof(int), hipMemcpyDeviceToHost); if (*check2 == 0) { printf("Number %d is a prime.\n", number); } free(prime); hipFree(d_prime); hipFree(d_number); hipFree(d_length); hipFree(checkPrimeDeviceFinal); end = clock(); tempo = ((double)(end - start)) / CLOCKS_PER_SEC; printf("\n\nElapsed time: %f\n", tempo); printf("==========================================================================================\n\n"); } } ////////////////////////////////////// END OF SEQUENTIAL CODE //////////////////////////////////////////////
07c38c59cdf1e497b01a1f8c830f464a3e3286b6.cu
//////////////////////////////// include & Definitions /////////////////////////////////////////////////////// #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define SIZE 1024 #define ALL_OK printf("\nProgram is working normally.\n"); //////////////////////////////// PARALLEL CODE /////////////////////////////////////////////////////// __global__ void trial(long int* prime, long int* number, long int* length, int* check) { long int i = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; // long int i = threadIdx.x + blockIdx.x * blockDim.x; if ((i > 1) && (i <= *length)) { if (prime[i]) { if (i * i == *number) { printf(" %d raised to square --------- proof: in fact %d * %d = %d, that is equal to value entered (%d)\n", i, i, i, (i * i), *number); *check = 1; } if (*number % i == 0 && i * i != *number) { printf(" %d and %d --------- proof: in fact %d * %d = %d, that is equal to value entered (%d)\n", i, (*number / i), i, (*number / i), (i * (*number / i)), *number); *check = 1; } } } void __syncthreads(); } /////////////////////////////////// END OF PARALLEL CODE ////////////////////////////////////////////////// /////////////////////////////////// SEQUENTIAL CODE ////////////////////////////////////////////////// void main() { int nDevices, * checkPrimeHost, * checkPrimeDeviceFinal; long int n = 2, threadsPerBlock, threadsDim, gridSize; static long int* d_length, * d_prime, * d_number; long int elim, number, * prime; bool exit_status = false; //Check GPU specs and if it is capable of run program. /*Guardare sempre I limiti massimi dell’hw (warp size, SM, max blocks, max threads per block) 2. I thread all’interno del blocco dovrebbero essere multipli dello WARP size 3. Partire da numeri grandi di threads e blocks, calcolare lo speed up e provare a diminuire i parametri cercando la soluzione ideale */ cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf(" GPU SPECS\n"); printf("==========================================================================================\n"); printf("Device Number: %d\n", i); printf("Device Name: %s\n", prop.name); printf("Memory Warp Size: %d\n", prop.warpSize); printf("Memory Shared Memory Per Block: %d\n", (int)prop.sharedMemPerBlock); printf("Max Threads Per Block: %d\n", prop.maxThreadsPerBlock); printf("Max Threads Dimension: %d\n", prop.maxThreadsDim[3]); printf("Max Grid Size: %d\n", prop.maxGridSize[3]); printf("Multi Processor Count: %d\n", prop.multiProcessorCount); printf("Memory Pitch by memory copy: %d\n", (int)prop.memPitch); printf("Least device CC: %d\n", prop.major); printf("==========================================================================================\n\n"); threadsPerBlock = prop.maxThreadsPerBlock; threadsDim = prop.maxThreadsDim[3]; gridSize = prop.maxGridSize[3]; } while (exit_status == false) { printf(" BEGIN\n\nPlease, enter number to factorize. To exit insert 0 and then press enter.\n" "Note: do not insert values major than 2147483647 because of long int size.\n\n " " Number: "); scanf("%d", &number); if (number == 0) { exit_status = true; printf("\nExit status activated. Aborting process...\n\n\n"); break; } clock_t start, end; double tempo; start = clock(); long int length = floor(sqrt(number)); prime = (long int*)malloc(number * sizeof(long int)); cudaMalloc((void**)&d_prime, SIZE * sizeof(long int)); cudaMalloc((void**)&d_number, sizeof(long int)); cudaMalloc((void**)&d_length, sizeof(long int)); cudaMalloc((void**)&checkPrimeDeviceFinal, (sizeof(int))); checkPrimeHost = 0; //Eratosthenes Sieve for (long int i = 0; i < length; i++) { prime[i] = 1; } while (n <= length) { if (prime[n] == 1) { elim = n + n; while (elim <= length) { prime[elim] = 0; elim += n; } } n++; } for (int i = 2; i < length; i++) printf("%d: %d - ", i, prime[i]); printf("\nLa lunghezza e' di %d\n", length); cudaMemcpy(d_prime, prime, SIZE * sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(d_number, &number, sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(d_length, &length, sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(checkPrimeDeviceFinal, checkPrimeHost, sizeof(int), cudaMemcpyHostToDevice); long int requiredBlocks = (length / SIZE) + 1; dim3 grid(requiredBlocks, 1, 1); dim3 block(SIZE, 1, 1); printf("\n\nNumber %d can be factored in following manners:\n\n", number); trial << < grid, block >> > (d_prime, d_number, d_length, checkPrimeDeviceFinal); int* check2; check2 = (int*)malloc(sizeof(int)); cudaMemcpy(check2, checkPrimeDeviceFinal, sizeof(int), cudaMemcpyDeviceToHost); if (*check2 == 0) { printf("Number %d is a prime.\n", number); } free(prime); cudaFree(d_prime); cudaFree(d_number); cudaFree(d_length); cudaFree(checkPrimeDeviceFinal); end = clock(); tempo = ((double)(end - start)) / CLOCKS_PER_SEC; printf("\n\nElapsed time: %f\n", tempo); printf("==========================================================================================\n\n"); } } ////////////////////////////////////// END OF SEQUENTIAL CODE //////////////////////////////////////////////
8e5bf2a5e38526ed9716e93345caa21f8e2458ef.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* > File Name: 04blockpalladd.cu > Author: dong xu > Mail: gwmxyd@163.com > Created Time: 20160330 132557 ************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size); /** * addKernel<<<blocksize,threadsize>>>(): * blocksizethreadsize * blockthread * block */ __global__ void addKernel(int *c, const int *a, const int *b) { int i = blockIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus; int num = 0; hipDeviceProp_t prop; cudaStatus = hipGetDeviceCount(&num); for(int i = 0;i<num;i++) { hipGetDeviceProperties(&prop,i); } cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(size),dim3(1) , 0, 0, dev_c, dev_a, dev_b); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); hipLaunchKernelGGL(( addKernel), dim3(size),dim3(1) , 0, 0, dev_c, dev_a, dev_c); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
8e5bf2a5e38526ed9716e93345caa21f8e2458ef.cu
/************************************************************************* > File Name: 04blockpalladd.cu > Author: dong xu > Mail: gwmxyd@163.com > Created Time: 2016年03月30日 星期三 13时25分57秒 ************************************************************************/ #include <stdio.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size); /** * addKernel<<<blocksize,threadsize>>>(): * 代表着公开起blocksize个含有threadsize个线程的块 * block代表着粗粒度的执行,thread代表着细粒度的执行 * 若果不同的block中的线程公用一个资源,线程块之间将穿行执行 */ __global__ void addKernel(int *c, const int *a, const int *b) { int i = blockIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus; int num = 0; cudaDeviceProp prop; cudaStatus = cudaGetDeviceCount(&num); for(int i = 0;i<num;i++) { cudaGetDeviceProperties(&prop,i); } cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]); // cudaThreadExit must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaThreadExit(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaThreadExit failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<size,1 >>>(dev_c, dev_a, dev_b); // cudaThreadSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaThreadSynchronize(); addKernel<<<size,1 >>>(dev_c, dev_a, dev_c); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
b43f7597bfed4bac1a81af97b3b14cfd585ee9cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix_array.h" #include "cuda_common.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <random> #include <iostream> #include <iomanip> using namespace mtk; const int BLOCKS = 1 << 7; __global__ void deviceSetConstant(float *device_ptr,float f,int w,int max_t){ int tid = (threadIdx.x + blockIdx.x * blockDim.x)*w; for(int i = 0;i < w;i++){ if(max_t <= tid) return; device_ptr[tid] = f; tid++; } } __global__ void deviceSetRandom(float *device_ptr,float min,float max,int seed,int w,int max_t){ int tid = (threadIdx.x + blockIdx.x * blockDim.x)*w; hiprandState_t s; hiprand_init(seed,tid,0,&s); for(int i = 0;i < w;i++){ if(max_t <= tid) return; device_ptr[tid] = hiprand_uniform(&s) * (max - min) + min; tid++; } } __global__ void deviceCopy(float* device_ptr_dst,float* device_ptr_src,int max_t){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(max_t <= tid) return; device_ptr_dst[tid] = device_ptr_src[tid]; } MatrixXf::MatrixXf(int rows,int cols): rows(rows),cols(cols),device_ptr(nullptr),host_ptr(nullptr),depth(0) {} MatrixXf::MatrixXf():MatrixXf(0,0) {} MatrixXf::~MatrixXf(){ this->releaseDevice(); this->releaseHost(); } MatrixXf* MatrixXf::setSize(int rows,int cols){ this->rows = rows; this->cols = cols; return this; } MatrixXf* MatrixXf::allocateDevice(){ CUDA_HANDLE_ERROR( hipMalloc( (void**)&device_ptr, sizeof(float) * rows * cols )); return this; } MatrixXf* MatrixXf::allocateHost(){ CUDA_HANDLE_ERROR( hipHostMalloc( (void**)&host_ptr, sizeof(float) * rows * cols )); return this; } MatrixXf* MatrixXf::copyToDevice(){ CUDA_HANDLE_ERROR( hipMemcpy( device_ptr, host_ptr, sizeof(float) * rows * cols, hipMemcpyHostToDevice ) ); return this; } MatrixXf* MatrixXf::copyToHost(){ CUDA_HANDLE_ERROR( hipMemcpy( host_ptr, device_ptr, sizeof(float) * rows * cols, hipMemcpyDeviceToHost ) ); return this; } MatrixXf* MatrixXf::copyTo(float* dst_ptr){ hipLaunchKernelGGL(( deviceCopy), dim3(BLOCKS),dim3((rows*cols+BLOCKS-1)/BLOCKS), 0, 0, dst_ptr,device_ptr,rows*cols); return this; } MatrixXf* MatrixXf::copyTo(mtk::MatrixXf& matrix){ copyTo(matrix.getDevicePointer()); return this; } int MatrixXf::getCols()const{return cols;} int MatrixXf::getRows()const{return rows;} int MatrixXf::getSize()const{return rows * cols;} float* MatrixXf::getDevicePointer()const{return device_ptr;} float* MatrixXf::getHostPointer()const{return host_ptr;} void MatrixXf::setDevicePointer(float* dp){device_ptr = dp;} void MatrixXf::setHostPointer(float* hp){host_ptr = hp;} void MatrixXf::operator=(MatrixXf m){ this->cols = m.getCols(); this->rows = m.getRows(); } MatrixXf* MatrixXf::initDeviceConstant(float f){ hipLaunchKernelGGL(( deviceSetConstant), dim3(BLOCKS),dim3(::min(512,threads_ceildiv(rows*cols,BLOCKS))), 0, 0, device_ptr,f,(threads_ceildiv(rows*cols,BLOCKS)+511)/512,rows*cols); CUDA_HANDLE_ERROR(hipDeviceSynchronize()); return this; } MatrixXf* MatrixXf::initDeviceRandom(float min,float max){ std::random_device random; hipLaunchKernelGGL(( deviceSetRandom), dim3(BLOCKS),dim3(::min(512,threads_ceildiv(rows*cols,BLOCKS))), 0, 0, device_ptr,min,max,random(),(threads_ceildiv(rows*cols,BLOCKS)+511)/512,rows*cols); CUDA_HANDLE_ERROR(hipDeviceSynchronize()); return this; } MatrixXf* MatrixXf::print(std::string label){ if(label.compare("") != 0) std::cout<<label<<" = "<<std::endl; for(int i = 0;i < rows;i++){ for(int j = 0;j < cols;j++){ if(host_ptr[j * rows + i] >= 0.0f) printf(" %.3f ",host_ptr[j * rows + i]); else printf("%.3f ",host_ptr[j * rows + i]); //std::cout<<std::setw(5)<<host_ptr[j * rows + i]<<" "; } std::cout<<std::endl; } return this; } MatrixXf* MatrixXf::releaseDevice(){ if(depth==0){ CUDA_HANDLE_ERROR( hipFree( device_ptr ) ); device_ptr = nullptr; } return this; } MatrixXf* MatrixXf::releaseHost(){ if(depth==0){ CUDA_HANDLE_ERROR( hipHostFree( host_ptr ) ); host_ptr = nullptr; } return this; } MatrixXf* MatrixXf::splitDevice(mtk::MatrixXf& s0_mat,mtk::MatrixXf& s1_mat){ if(s0_mat.getSize()+s1_mat.getSize() < this->getSize()){ return this; } if(device_ptr == nullptr){ return this; } //s0_mat.device_ptr = device_ptr; s0_mat.depth = depth + 1; s0_mat.setDevicePointer(device_ptr); //s1_mat.device_ptr = device_ptr + s0_mat.getSize(); s1_mat.setDevicePointer(device_ptr + s0_mat.getSize()); s1_mat.depth = depth + 1; return this; } MatrixXf* MatrixXf::splitHost(mtk::MatrixXf& s0_mat,mtk::MatrixXf& s1_mat){ if(s0_mat.getSize()+s1_mat.getSize() < this->getSize()){ return this; } if(host_ptr == nullptr){ return this; } //s0_mat.host_ptr = host_ptr; s0_mat.setHostPointer(host_ptr); s0_mat.depth = depth + 1; s1_mat.setHostPointer(host_ptr + s0_mat.getSize()); //s1_mat.host_ptr = host_ptr + s0_mat.getSize(); s1_mat.depth = depth + 1; return this; }
b43f7597bfed4bac1a81af97b3b14cfd585ee9cc.cu
#include "matrix_array.h" #include "cuda_common.h" #include <curand.h> #include <curand_kernel.h> #include <random> #include <iostream> #include <iomanip> using namespace mtk; const int BLOCKS = 1 << 7; __global__ void deviceSetConstant(float *device_ptr,float f,int w,int max_t){ int tid = (threadIdx.x + blockIdx.x * blockDim.x)*w; for(int i = 0;i < w;i++){ if(max_t <= tid) return; device_ptr[tid] = f; tid++; } } __global__ void deviceSetRandom(float *device_ptr,float min,float max,int seed,int w,int max_t){ int tid = (threadIdx.x + blockIdx.x * blockDim.x)*w; curandState s; curand_init(seed,tid,0,&s); for(int i = 0;i < w;i++){ if(max_t <= tid) return; device_ptr[tid] = curand_uniform(&s) * (max - min) + min; tid++; } } __global__ void deviceCopy(float* device_ptr_dst,float* device_ptr_src,int max_t){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(max_t <= tid) return; device_ptr_dst[tid] = device_ptr_src[tid]; } MatrixXf::MatrixXf(int rows,int cols): rows(rows),cols(cols),device_ptr(nullptr),host_ptr(nullptr),depth(0) {} MatrixXf::MatrixXf():MatrixXf(0,0) {} MatrixXf::~MatrixXf(){ this->releaseDevice(); this->releaseHost(); } MatrixXf* MatrixXf::setSize(int rows,int cols){ this->rows = rows; this->cols = cols; return this; } MatrixXf* MatrixXf::allocateDevice(){ CUDA_HANDLE_ERROR( cudaMalloc( (void**)&device_ptr, sizeof(float) * rows * cols )); return this; } MatrixXf* MatrixXf::allocateHost(){ CUDA_HANDLE_ERROR( cudaMallocHost( (void**)&host_ptr, sizeof(float) * rows * cols )); return this; } MatrixXf* MatrixXf::copyToDevice(){ CUDA_HANDLE_ERROR( cudaMemcpy( device_ptr, host_ptr, sizeof(float) * rows * cols, cudaMemcpyHostToDevice ) ); return this; } MatrixXf* MatrixXf::copyToHost(){ CUDA_HANDLE_ERROR( cudaMemcpy( host_ptr, device_ptr, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost ) ); return this; } MatrixXf* MatrixXf::copyTo(float* dst_ptr){ deviceCopy<<<BLOCKS,(rows*cols+BLOCKS-1)/BLOCKS>>>(dst_ptr,device_ptr,rows*cols); return this; } MatrixXf* MatrixXf::copyTo(mtk::MatrixXf& matrix){ copyTo(matrix.getDevicePointer()); return this; } int MatrixXf::getCols()const{return cols;} int MatrixXf::getRows()const{return rows;} int MatrixXf::getSize()const{return rows * cols;} float* MatrixXf::getDevicePointer()const{return device_ptr;} float* MatrixXf::getHostPointer()const{return host_ptr;} void MatrixXf::setDevicePointer(float* dp){device_ptr = dp;} void MatrixXf::setHostPointer(float* hp){host_ptr = hp;} void MatrixXf::operator=(MatrixXf m){ this->cols = m.getCols(); this->rows = m.getRows(); } MatrixXf* MatrixXf::initDeviceConstant(float f){ deviceSetConstant<<<BLOCKS,std::min(512,threads_ceildiv(rows*cols,BLOCKS))>>>(device_ptr,f,(threads_ceildiv(rows*cols,BLOCKS)+511)/512,rows*cols); CUDA_HANDLE_ERROR(cudaDeviceSynchronize()); return this; } MatrixXf* MatrixXf::initDeviceRandom(float min,float max){ std::random_device random; deviceSetRandom<<<BLOCKS,std::min(512,threads_ceildiv(rows*cols,BLOCKS))>>>(device_ptr,min,max,random(),(threads_ceildiv(rows*cols,BLOCKS)+511)/512,rows*cols); CUDA_HANDLE_ERROR(cudaDeviceSynchronize()); return this; } MatrixXf* MatrixXf::print(std::string label){ if(label.compare("") != 0) std::cout<<label<<" = "<<std::endl; for(int i = 0;i < rows;i++){ for(int j = 0;j < cols;j++){ if(host_ptr[j * rows + i] >= 0.0f) printf(" %.3f ",host_ptr[j * rows + i]); else printf("%.3f ",host_ptr[j * rows + i]); //std::cout<<std::setw(5)<<host_ptr[j * rows + i]<<" "; } std::cout<<std::endl; } return this; } MatrixXf* MatrixXf::releaseDevice(){ if(depth==0){ CUDA_HANDLE_ERROR( cudaFree( device_ptr ) ); device_ptr = nullptr; } return this; } MatrixXf* MatrixXf::releaseHost(){ if(depth==0){ CUDA_HANDLE_ERROR( cudaFreeHost( host_ptr ) ); host_ptr = nullptr; } return this; } MatrixXf* MatrixXf::splitDevice(mtk::MatrixXf& s0_mat,mtk::MatrixXf& s1_mat){ if(s0_mat.getSize()+s1_mat.getSize() < this->getSize()){ return this; } if(device_ptr == nullptr){ return this; } //s0_mat.device_ptr = device_ptr; s0_mat.depth = depth + 1; s0_mat.setDevicePointer(device_ptr); //s1_mat.device_ptr = device_ptr + s0_mat.getSize(); s1_mat.setDevicePointer(device_ptr + s0_mat.getSize()); s1_mat.depth = depth + 1; return this; } MatrixXf* MatrixXf::splitHost(mtk::MatrixXf& s0_mat,mtk::MatrixXf& s1_mat){ if(s0_mat.getSize()+s1_mat.getSize() < this->getSize()){ return this; } if(host_ptr == nullptr){ return this; } //s0_mat.host_ptr = host_ptr; s0_mat.setHostPointer(host_ptr); s0_mat.depth = depth + 1; s1_mat.setHostPointer(host_ptr + s0_mat.getSize()); //s1_mat.host_ptr = host_ptr + s0_mat.getSize(); s1_mat.depth = depth + 1; return this; }
1147c99354c7d490c8cda601bc7809acfd9d5f32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cu_deform.h" __host__ void host_deform(float *d_img1, float *d_img, int nx, int ny, int nz, float volume, float flow, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z) { const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); float *mx, *my, *mz; hipMalloc((void**)&mx, nx * ny * nz * sizeof(float)); hipMalloc((void**)&my, nx * ny * nz * sizeof(float)); hipMalloc((void**)&mz, nx * ny * nz * sizeof(float)); hipLaunchKernelGGL(( kernel_forwardDVF), dim3(gridSize), dim3(blockSize), 0, 0, mx, my, mz, alpha_x, alpha_y, alpha_z, beta_x, beta_y, beta_z, volume, flow, nx, ny, nz); hipDeviceSynchronize(); hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipPitchedPtr dp_img = make_hipPitchedPtr((void*) d_img, nx * sizeof(float), nx, ny); hipMemcpy3DParms copyParams = {0}; struct hipExtent extent_img = make_hipExtent(nx, ny, nz); copyParams.extent = extent_img; copyParams.kind = hipMemcpyDeviceToDevice; copyParams.srcPtr = dp_img; hipArray *array_img; hipMalloc3DArray(&array_img, &channelDesc, extent_img); copyParams.dstArray = array_img; hipMemcpy3D(&copyParams); hipResourceDesc resDesc; hipTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; resDesc.res.array.array = array_img; hipTextureObject_t tex_img = 0; hipCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL); hipLaunchKernelGGL(( kernel_deformation), dim3(gridSize), dim3(blockSize), 0, 0, d_img1, tex_img, mx, my, mz, nx, ny, nz); hipDeviceSynchronize(); hipFree(mx); hipFree(my); hipFree(mz); hipDestroyTextureObject(tex_img); hipFreeArray(array_img); } __host__ void host_deform2(float *d_img1, float *d_img, int nx, int ny, int nz, float volume, float flow, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z) { const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); float *mx, *my, *mz; hipMalloc((void**)&mx, nx * ny * nz * sizeof(float)); hipMalloc((void**)&my, nx * ny * nz * sizeof(float)); hipMalloc((void**)&mz, nx * ny * nz * sizeof(float)); hipLaunchKernelGGL(( kernel_forwardDVF), dim3(gridSize), dim3(blockSize), 0, 0, mx, my, mz, alpha_x, alpha_y, alpha_z, beta_x, beta_y, beta_z, volume, flow, nx, ny, nz); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_deformation2), dim3(gridSize), dim3(blockSize), 0, 0, d_img1, d_img, mx, my, mz, nx, ny, nz); hipDeviceSynchronize(); hipFree(mx); hipFree(my); hipFree(mz); } __global__ void kernel_forwardDVF(float *mx, float *my, float *mz, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z, float volume, float flow, int nx, int ny, int nz) { int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; mx[id] = alpha_x[id] * volume + beta_x[id] * flow; my[id] = alpha_y[id] * volume + beta_y[id] * flow; mz[id] = alpha_z[id] * volume + beta_z[id] * flow; } __global__ void kernel_deformation(float *img1, hipTextureObject_t tex_img, float *mx, float *my, float *mz, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = iy + ix * ny + iz * nx * ny; float xi = iy + 1.0f + my[id]; float yi = ix + 1.0f + mx[id]; float zi = iz + 1.0f + mz[id]; img1[id] = tex3D<float>(tex_img, xi - 0.5f, yi - 0.5f, zi - 0.5f); } __global__ void kernel_deformation2(float *img1, float *img, float *mx, float *my, float *mz, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = iy + ix * ny + iz * nx * ny; float dx, dy, dz; if (ix == nx - 1) dx = 0; else dx = img[id + 1] - img[id]; if (iy == ny - 1) dy = 0; else dy = img[id + nx] - img[id]; if (iz == nz - 1) dz = 0; else dz = img[id + nx * ny] - img[id]; img1[id] = img[id] + dy * mx[id] + dx * my[id] + dz * mz[id]; }
1147c99354c7d490c8cda601bc7809acfd9d5f32.cu
#include "cu_deform.h" __host__ void host_deform(float *d_img1, float *d_img, int nx, int ny, int nz, float volume, float flow, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z) { const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); float *mx, *my, *mz; cudaMalloc((void**)&mx, nx * ny * nz * sizeof(float)); cudaMalloc((void**)&my, nx * ny * nz * sizeof(float)); cudaMalloc((void**)&mz, nx * ny * nz * sizeof(float)); kernel_forwardDVF<<<gridSize, blockSize>>>(mx, my, mz, alpha_x, alpha_y, alpha_z, beta_x, beta_y, beta_z, volume, flow, nx, ny, nz); cudaDeviceSynchronize(); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaPitchedPtr dp_img = make_cudaPitchedPtr((void*) d_img, nx * sizeof(float), nx, ny); cudaMemcpy3DParms copyParams = {0}; struct cudaExtent extent_img = make_cudaExtent(nx, ny, nz); copyParams.extent = extent_img; copyParams.kind = cudaMemcpyDeviceToDevice; copyParams.srcPtr = dp_img; cudaArray *array_img; cudaMalloc3DArray(&array_img, &channelDesc, extent_img); copyParams.dstArray = array_img; cudaMemcpy3D(&copyParams); cudaResourceDesc resDesc; cudaTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; resDesc.res.array.array = array_img; cudaTextureObject_t tex_img = 0; cudaCreateTextureObject(&tex_img, &resDesc, &texDesc, NULL); kernel_deformation<<<gridSize, blockSize>>>(d_img1, tex_img, mx, my, mz, nx, ny, nz); cudaDeviceSynchronize(); cudaFree(mx); cudaFree(my); cudaFree(mz); cudaDestroyTextureObject(tex_img); cudaFreeArray(array_img); } __host__ void host_deform2(float *d_img1, float *d_img, int nx, int ny, int nz, float volume, float flow, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z) { const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); float *mx, *my, *mz; cudaMalloc((void**)&mx, nx * ny * nz * sizeof(float)); cudaMalloc((void**)&my, nx * ny * nz * sizeof(float)); cudaMalloc((void**)&mz, nx * ny * nz * sizeof(float)); kernel_forwardDVF<<<gridSize, blockSize>>>(mx, my, mz, alpha_x, alpha_y, alpha_z, beta_x, beta_y, beta_z, volume, flow, nx, ny, nz); cudaDeviceSynchronize(); kernel_deformation2<<<gridSize, blockSize>>>(d_img1, d_img, mx, my, mz, nx, ny, nz); cudaDeviceSynchronize(); cudaFree(mx); cudaFree(my); cudaFree(mz); } __global__ void kernel_forwardDVF(float *mx, float *my, float *mz, float *alpha_x, float *alpha_y, float *alpha_z, float *beta_x, float *beta_y, float *beta_z, float volume, float flow, int nx, int ny, int nz) { int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; mx[id] = alpha_x[id] * volume + beta_x[id] * flow; my[id] = alpha_y[id] * volume + beta_y[id] * flow; mz[id] = alpha_z[id] * volume + beta_z[id] * flow; } __global__ void kernel_deformation(float *img1, cudaTextureObject_t tex_img, float *mx, float *my, float *mz, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = iy + ix * ny + iz * nx * ny; float xi = iy + 1.0f + my[id]; float yi = ix + 1.0f + mx[id]; float zi = iz + 1.0f + mz[id]; img1[id] = tex3D<float>(tex_img, xi - 0.5f, yi - 0.5f, zi - 0.5f); } __global__ void kernel_deformation2(float *img1, float *img, float *mx, float *my, float *mz, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = iy + ix * ny + iz * nx * ny; float dx, dy, dz; if (ix == nx - 1) dx = 0; else dx = img[id + 1] - img[id]; if (iy == ny - 1) dy = 0; else dy = img[id + nx] - img[id]; if (iz == nz - 1) dz = 0; else dz = img[id + nx * ny] - img[id]; img1[id] = img[id] + dy * mx[id] + dx * my[id] + dz * mz[id]; }
4ea119cda3851f31f267b785497aa4f711bc1eb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define NUM_ELEMENTS 512 // **===----------------- MP3 - Modify this function ---------------------===** //! @param g_idata input data in global memory // result is expected in index 0 of g_idata //! @param n input number of elements to scan from input data // **===------------------------------------------------------------------===** __global__ void reduction(float *g_data, int n) { // Performs reduction addition in log2(num_elements/2)+1 syncs int id = threadIdx.x; // Load array into local shared memory, each thread responsible for two // values it computes first, perform first level of reduction while we're // here __shared__ float s_data[NUM_ELEMENTS]; s_data[id*2] = g_data[id*2] + g_data[id*2+1]; for(int k = 2; k <= n; k = k*2) { __syncthreads(); if(id < n/(k*2)) s_data[k*id*2] = s_data[k*id*2] + s_data[k*(id*2+1)]; } if(id == 0) g_data[0] = s_data[0]; } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
4ea119cda3851f31f267b785497aa4f711bc1eb9.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define NUM_ELEMENTS 512 // **===----------------- MP3 - Modify this function ---------------------===** //! @param g_idata input data in global memory // result is expected in index 0 of g_idata //! @param n input number of elements to scan from input data // **===------------------------------------------------------------------===** __global__ void reduction(float *g_data, int n) { // Performs reduction addition in log2(num_elements/2)+1 syncs int id = threadIdx.x; // Load array into local shared memory, each thread responsible for two // values it computes first, perform first level of reduction while we're // here __shared__ float s_data[NUM_ELEMENTS]; s_data[id*2] = g_data[id*2] + g_data[id*2+1]; for(int k = 2; k <= n; k = k*2) { __syncthreads(); if(id < n/(k*2)) s_data[k*id*2] = s_data[k*id*2] + s_data[k*(id*2+1)]; } if(id == 0) g_data[0] = s_data[0]; } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
97a6b080f4b4a86b2efa36f005ce7e4709ba5e2f.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_device #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; using Eigen::RowMajor; // Context for evaluation on cpu struct CPUContext { CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) { kernel_1d_(0) = 3.14f; kernel_1d_(1) = 2.7f; kernel_2d_(0,0) = 3.14f; kernel_2d_(1,0) = 2.7f; kernel_2d_(0,1) = 0.2f; kernel_2d_(1,1) = 7.0f; kernel_3d_(0,0,0) = 3.14f; kernel_3d_(0,1,0) = 2.7f; kernel_3d_(0,0,1) = 0.2f; kernel_3d_(0,1,1) = 7.0f; kernel_3d_(1,0,0) = -1.0f; kernel_3d_(1,1,0) = -0.3f; kernel_3d_(1,0,1) = -0.7f; kernel_3d_(1,1,1) = -0.5f; } const Eigen::DefaultDevice& device() const { return cpu_device_; } const Eigen::Tensor<float, 3>& in1() const { return in1_; } const Eigen::Tensor<float, 3>& in2() const { return in2_; } Eigen::Tensor<float, 3>& out() { return out_; } const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; } const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; } const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; } private: const Eigen::Tensor<float, 3>& in1_; const Eigen::Tensor<float, 3>& in2_; Eigen::Tensor<float, 3>& out_; Eigen::Tensor<float, 1> kernel_1d_; Eigen::Tensor<float, 2> kernel_2d_; Eigen::Tensor<float, 3> kernel_3d_; Eigen::DefaultDevice cpu_device_; }; // Context for evaluation on GPU struct GPUContext { GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) { assert(hipMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == hipSuccess); float kernel_1d_val[] = {3.14f, 2.7f}; assert(hipMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), hipMemcpyHostToDevice) == hipSuccess); assert(hipMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == hipSuccess); float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f}; assert(hipMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), hipMemcpyHostToDevice) == hipSuccess); assert(hipMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == hipSuccess); float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f}; assert(hipMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), hipMemcpyHostToDevice) == hipSuccess); } ~GPUContext() { assert(hipFree(kernel_1d_) == hipSuccess); assert(hipFree(kernel_2d_) == hipSuccess); assert(hipFree(kernel_3d_) == hipSuccess); } const Eigen::GpuDevice& device() const { return gpu_device_; } const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; } const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; } Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; } Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); } Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, 2, 2); } Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, 2, 2, 2); } private: const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_; const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_; Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_; float* kernel_1d_; float* kernel_2d_; float* kernel_3d_; Eigen::CudaStreamDevice stream_; Eigen::GpuDevice gpu_device_; }; // The actual expression to evaluate template <typename Context> void test_contextual_eval(Context* context) { context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f); } template <typename Context> void test_forced_contextual_eval(Context* context) { context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f); } template <typename Context> void test_compound_assignment(Context* context) { context->out().device(context->device()) = context->in1().constant(2.718f); context->out().device(context->device()) += context->in1() + context->in2() * 3.14f; } template <typename Context> void test_contraction(Context* context) { Eigen::array<std::pair<int, int>, 2> dims; dims[0] = std::make_pair(1, 1); dims[1] = std::make_pair(2, 2); Eigen::array<int, 2> shape(40, 50*70); Eigen::DSizes<int, 2> indices(0,0); Eigen::DSizes<int, 2> sizes(40,40); context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims); } template <typename Context> void test_1d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(40,49,70); Eigen::array<int, 1> dims(1); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims); } template <typename Context> void test_2d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(40,49,69); Eigen::array<int, 2> dims(1,2); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims); } template <typename Context> void test_3d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(39,49,69); Eigen::array<int, 3> dims(0,1,2); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims); } void test_cpu() { Eigen::Tensor<float, 3> in1(40,50,70); Eigen::Tensor<float, 3> in2(40,50,70); Eigen::Tensor<float, 3> out(40,50,70); in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); CPUContext context(in1, in2, out); test_contextual_eval(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_forced_contextual_eval(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); } } } test_compound_assignment(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_contraction(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 40; ++j) { const float result = out(i,j,0); float expected = 0; for (int k = 0; k < 50; ++k) { for (int l = 0; l < 70; ++l) { expected += in1(i, k, l) * in2(j, k, l); } } VERIFY_IS_APPROX(expected, result); } } test_1d_convolution(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); } } } test_2d_convolution(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) + (in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { continue; } VERIFY_IS_APPROX(expected, result); } } } test_3d_convolution(&context); for (int i = 0; i < 39; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) + (in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { continue; } VERIFY_IS_APPROX(expected, result); } } } } void test_gpu() { Eigen::Tensor<float, 3> in1(40,50,70); Eigen::Tensor<float, 3> in2(40,50,70); Eigen::Tensor<float, 3> out(40,50,70); in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_in2), in2_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), in2_bytes, hipMemcpyHostToDevice); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, 40,50,70); GPUContext context(gpu_in1, gpu_in2, gpu_out); test_contextual_eval(&context); assert(hipMemcpy(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_forced_contextual_eval(&context); assert(hipMemcpy(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); } } } test_compound_assignment(&context); assert(hipMemcpy(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_contraction(&context); assert(hipMemcpy(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 40; ++j) { const float result = out(i,j,0); float expected = 0; for (int k = 0; k < 50; ++k) { for (int l = 0; l < 70; ++l) { expected += in1(i, k, l) * in2(j, k, l); } } VERIFY_IS_APPROX(expected, result); } } test_1d_convolution(&context); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, context.device().stream()) == hipSuccess); assert(hipStreamSynchronize(context.device().stream()) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); } } } test_2d_convolution(&context); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, context.device().stream()) == hipSuccess); assert(hipStreamSynchronize(context.device().stream()) == hipSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); VERIFY_IS_APPROX(expected, result); } } } test_3d_convolution(&context); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, context.device().stream()) == hipSuccess); assert(hipStreamSynchronize(context.device().stream()) == hipSuccess); for (int i = 0; i < 39; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f + in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); VERIFY_IS_APPROX(expected, result); } } } } void test_cxx11_tensor_device() { CALL_SUBTEST_1(test_cpu()); CALL_SUBTEST_2(test_gpu()); }
97a6b080f4b4a86b2efa36f005ce7e4709ba5e2f.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_device #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; using Eigen::RowMajor; // Context for evaluation on cpu struct CPUContext { CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) { kernel_1d_(0) = 3.14f; kernel_1d_(1) = 2.7f; kernel_2d_(0,0) = 3.14f; kernel_2d_(1,0) = 2.7f; kernel_2d_(0,1) = 0.2f; kernel_2d_(1,1) = 7.0f; kernel_3d_(0,0,0) = 3.14f; kernel_3d_(0,1,0) = 2.7f; kernel_3d_(0,0,1) = 0.2f; kernel_3d_(0,1,1) = 7.0f; kernel_3d_(1,0,0) = -1.0f; kernel_3d_(1,1,0) = -0.3f; kernel_3d_(1,0,1) = -0.7f; kernel_3d_(1,1,1) = -0.5f; } const Eigen::DefaultDevice& device() const { return cpu_device_; } const Eigen::Tensor<float, 3>& in1() const { return in1_; } const Eigen::Tensor<float, 3>& in2() const { return in2_; } Eigen::Tensor<float, 3>& out() { return out_; } const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; } const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; } const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; } private: const Eigen::Tensor<float, 3>& in1_; const Eigen::Tensor<float, 3>& in2_; Eigen::Tensor<float, 3>& out_; Eigen::Tensor<float, 1> kernel_1d_; Eigen::Tensor<float, 2> kernel_2d_; Eigen::Tensor<float, 3> kernel_3d_; Eigen::DefaultDevice cpu_device_; }; // Context for evaluation on GPU struct GPUContext { GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) { assert(cudaMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == cudaSuccess); float kernel_1d_val[] = {3.14f, 2.7f}; assert(cudaMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess); assert(cudaMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == cudaSuccess); float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f}; assert(cudaMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess); assert(cudaMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == cudaSuccess); float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f}; assert(cudaMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess); } ~GPUContext() { assert(cudaFree(kernel_1d_) == cudaSuccess); assert(cudaFree(kernel_2d_) == cudaSuccess); assert(cudaFree(kernel_3d_) == cudaSuccess); } const Eigen::GpuDevice& device() const { return gpu_device_; } const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; } const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; } Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; } Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); } Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, 2, 2); } Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, 2, 2, 2); } private: const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_; const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_; Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_; float* kernel_1d_; float* kernel_2d_; float* kernel_3d_; Eigen::CudaStreamDevice stream_; Eigen::GpuDevice gpu_device_; }; // The actual expression to evaluate template <typename Context> void test_contextual_eval(Context* context) { context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f); } template <typename Context> void test_forced_contextual_eval(Context* context) { context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f); } template <typename Context> void test_compound_assignment(Context* context) { context->out().device(context->device()) = context->in1().constant(2.718f); context->out().device(context->device()) += context->in1() + context->in2() * 3.14f; } template <typename Context> void test_contraction(Context* context) { Eigen::array<std::pair<int, int>, 2> dims; dims[0] = std::make_pair(1, 1); dims[1] = std::make_pair(2, 2); Eigen::array<int, 2> shape(40, 50*70); Eigen::DSizes<int, 2> indices(0,0); Eigen::DSizes<int, 2> sizes(40,40); context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims); } template <typename Context> void test_1d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(40,49,70); Eigen::array<int, 1> dims(1); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims); } template <typename Context> void test_2d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(40,49,69); Eigen::array<int, 2> dims(1,2); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims); } template <typename Context> void test_3d_convolution(Context* context) { Eigen::DSizes<int, 3> indices(0,0,0); Eigen::DSizes<int, 3> sizes(39,49,69); Eigen::array<int, 3> dims(0,1,2); context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims); } void test_cpu() { Eigen::Tensor<float, 3> in1(40,50,70); Eigen::Tensor<float, 3> in2(40,50,70); Eigen::Tensor<float, 3> out(40,50,70); in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); CPUContext context(in1, in2, out); test_contextual_eval(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_forced_contextual_eval(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); } } } test_compound_assignment(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_contraction(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 40; ++j) { const float result = out(i,j,0); float expected = 0; for (int k = 0; k < 50; ++k) { for (int l = 0; l < 70; ++l) { expected += in1(i, k, l) * in2(j, k, l); } } VERIFY_IS_APPROX(expected, result); } } test_1d_convolution(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); } } } test_2d_convolution(&context); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) + (in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { continue; } VERIFY_IS_APPROX(expected, result); } } } test_3d_convolution(&context); for (int i = 0; i < 39; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) + (in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { continue; } VERIFY_IS_APPROX(expected, result); } } } } void test_gpu() { Eigen::Tensor<float, 3> in1(40,50,70); Eigen::Tensor<float, 3> in2(40,50,70); Eigen::Tensor<float, 3> out(40,50,70); in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_in2), in2_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, 40,50,70); GPUContext context(gpu_in1, gpu_in2, gpu_out); test_contextual_eval(&context); assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_forced_contextual_eval(&context); assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); } } } test_compound_assignment(&context); assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 50; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); } } } test_contraction(&context); assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 40; ++j) { const float result = out(i,j,0); float expected = 0; for (int k = 0; k < 50; ++k) { for (int l = 0; l < 70; ++l) { expected += in1(i, k, l) * in2(j, k, l); } } VERIFY_IS_APPROX(expected, result); } } test_1d_convolution(&context); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess); assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 70; ++k) { VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); } } } test_2d_convolution(&context); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess); assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess); for (int i = 0; i < 40; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); VERIFY_IS_APPROX(expected, result); } } } test_3d_convolution(&context); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess); assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess); for (int i = 0; i < 39; ++i) { for (int j = 0; j < 49; ++j) { for (int k = 0; k < 69; ++k) { const float result = out(i,j,k); const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f + in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); VERIFY_IS_APPROX(expected, result); } } } } void test_cxx11_tensor_device() { CALL_SUBTEST_1(test_cpu()); CALL_SUBTEST_2(test_gpu()); }
b27c382e4b69198f3eddf68c31b44077e28a6046.hip
// !!! This is a file automatically generated by hipify!!! //Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <hip/hip_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 #define BLOCKSIZE 8 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMulCUDA(float* A, float* B, float* C, unsigned long long *runtime) { unsigned long long start_time = clock64(); int blockRow = blockIdx.x; int blockCol = blockIdx.y; int row = threadIdx.x; int col = threadIdx.y; float cVal = 0.0; for(int nIter=0; nIter<WIDTH/BLOCKSIZE; nIter++) { __shared__ float shared_A[BLOCKSIZE][BLOCKSIZE]; __shared__ float shared_B[BLOCKSIZE][BLOCKSIZE]; shared_A[row][col] = A[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)]; shared_B[row][col] = B[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)]; __syncthreads(); for(int k = 0; k < BLOCKSIZE; k++) { cVal += shared_A[row][k] * shared_B[k][col] ; } __syncthreads(); } C[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)] = cVal; unsigned long long stop_time = clock64(); runtime[row] = (unsigned long long)(stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f; h_B[i*WIDTH + j] = 1.0f; } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given hipMalloc code properly // --> float* d_A = NULL; err = hipMalloc((void**)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_B = NULL; err = hipMalloc((void**)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = hipMalloc((void**)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); hipMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> dim3 blocksPerGrid (WIDTH/BLOCKSIZE, WIDTH/BLOCKSIZE);// FILL HERE dim3 threadsPerBlock (BLOCKSIZE,BLOCKSIZE);// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", (WIDTH/BLOCKSIZE)*(WIDTH/BLOCKSIZE), BLOCKSIZE*BLOCKSIZE); hipLaunchKernelGGL(( MatrixMulCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_runtime); // <-- err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Verify that the result matrix is correct bool res = 1; for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given hipFree statement according to your data allocation // --> hipFree(d_A); hipFree(d_B); hipFree(d_C); #ifdef TM hipFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
b27c382e4b69198f3eddf68c31b44077e28a6046.cu
//Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <cuda_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 #define BLOCKSIZE 8 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMulCUDA(float* A, float* B, float* C, unsigned long long *runtime) { unsigned long long start_time = clock64(); int blockRow = blockIdx.x; int blockCol = blockIdx.y; int row = threadIdx.x; int col = threadIdx.y; float cVal = 0.0; for(int nIter=0; nIter<WIDTH/BLOCKSIZE; nIter++) { __shared__ float shared_A[BLOCKSIZE][BLOCKSIZE]; __shared__ float shared_B[BLOCKSIZE][BLOCKSIZE]; shared_A[row][col] = A[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)]; shared_B[row][col] = B[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)]; __syncthreads(); for(int k = 0; k < BLOCKSIZE; k++) { cVal += shared_A[row][k] * shared_B[k][col] ; } __syncthreads(); } C[(blockRow*BLOCKSIZE+row) * WIDTH + (blockCol*BLOCKSIZE+col)] = cVal; unsigned long long stop_time = clock64(); runtime[row] = (unsigned long long)(stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f; h_B[i*WIDTH + j] = 1.0f; } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given cudaMalloc code properly // --> float* d_A = NULL; err = cudaMalloc((void**)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_B = NULL; err = cudaMalloc((void**)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = cudaMalloc((void**)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); cudaMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> dim3 blocksPerGrid (WIDTH/BLOCKSIZE, WIDTH/BLOCKSIZE);// FILL HERE dim3 threadsPerBlock (BLOCKSIZE,BLOCKSIZE);// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", (WIDTH/BLOCKSIZE)*(WIDTH/BLOCKSIZE), BLOCKSIZE*BLOCKSIZE); MatrixMulCUDA<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_runtime); // <-- err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Verify that the result matrix is correct bool res = 1; for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given cudaFree statement according to your data allocation // --> cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); #ifdef TM cudaFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
dict_enc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> namespace cudf { namespace io { namespace orc { namespace gpu { constexpr uint32_t max_dict_entries = default_row_index_stride; constexpr int init_hash_bits = 12; struct dictinit_state_s { uint32_t nnz; uint32_t total_dupes; DictionaryChunk chunk; volatile uint32_t scratch_red[32]; uint32_t dict[max_dict_entries]; union { uint16_t u16[1 << (init_hash_bits)]; uint32_t u32[1 << (init_hash_bits - 1)]; } map; }; /** * @brief Return a 12-bit hash from a byte sequence */ static inline __device__ uint32_t nvstr_init_hash(char const *ptr, uint32_t len) { if (len != 0) { return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1); } else { return 0; } } /** * @brief Fill dictionary with the indices of non-null rows * * @param[in,out] s dictionary builder state * @param[in] t thread id * @param[in] temp_storage shared memory storage to scan non-null positions */ template <int block_size, typename Storage> static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s, int t, Storage &temp_storage) { if (t == 0) { s->nnz = 0; } for (uint32_t i = 0; i < s->chunk.num_rows; i += 512) { const uint32_t *valid_map = s->chunk.valid_map_base; uint32_t is_valid, nz_pos; if (t < 16) { if (!valid_map) { s->scratch_red[t] = 0xffffffffu; } else { uint32_t row = s->chunk.start_row + i + t * 32; uint32_t v = (row < s->chunk.start_row + s->chunk.num_rows) ? valid_map[(row + s->chunk.column_offset) / 32] : 0; if (row & 0x1f) { uint32_t v1 = (row + 32 < s->chunk.start_row + s->chunk.num_rows) ? valid_map[((row + s->chunk.column_offset) / 32) + 1] : 0; v = __funnelshift_r(v, v1, row + s->chunk.column_offset); } s->scratch_red[t] = v; } } __syncthreads(); is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0; uint32_t tmp_nnz; hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage) .ExclusiveSum(is_valid, nz_pos, tmp_nnz); nz_pos += s->nnz; __syncthreads(); if (!t) { s->nnz += tmp_nnz; } if (is_valid) { s->dict[nz_pos] = i + t; } __syncthreads(); } } /** * @brief Gather all non-NULL string rows and compute total character data size * * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size, 2) gpuInitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns) { __shared__ __align__(16) dictinit_state_s state_g; using block_reduce = hipcub::BlockReduce<uint32_t, block_size>; using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; dictinit_state_s *const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; const nvstrdesc_s *ck_data; uint32_t *dict_data; uint32_t nnz, start_row, dict_char_count; int t = threadIdx.x; if (t == 0) s->chunk = chunks[group_id * num_columns + col_id]; for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) { if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0; } __syncthreads(); // First, take care of NULLs, and count how many strings we have (TODO: bypass this step when // there are no nulls) LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage); // Sum the lengths of all the strings if (t == 0) { s->chunk.string_char_count = 0; s->total_dupes = 0; } nnz = s->nnz; dict_data = s->chunk.dict_data; start_row = s->chunk.start_row; ck_data = static_cast<const nvstrdesc_s *>(s->chunk.column_data_base) + start_row; for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0; uint32_t hash = 0; uint32_t len = 0; if (i + t < nnz) { ck_row = s->dict[i + t]; len = static_cast<uint32_t>(ck_data[ck_row].count); hash = nvstr_init_hash(ck_data[ck_row].ptr, len); } len = block_reduce(temp_storage.reduce_storage).Sum(len); if (t == 0) s->chunk.string_char_count += len; if (i + t < nnz) { atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0)); dict_data[i + t] = start_row + ck_row; } __syncthreads(); } // Reorder the 16-bit local indices according to the hash value of the strings static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12"); { // Cumulative sum of hash map counts uint32_t count01 = s->map.u32[t * 4 + 0]; uint32_t count23 = s->map.u32[t * 4 + 1]; uint32_t count45 = s->map.u32[t * 4 + 2]; uint32_t count67 = s->map.u32[t * 4 + 3]; uint32_t sum01 = count01 + (count01 << 16); uint32_t sum23 = count23 + (count23 << 16); uint32_t sum45 = count45 + (count45 << 16); uint32_t sum67 = count67 + (count67 << 16); sum23 += (sum01 >> 16) * 0x10001; sum45 += (sum23 >> 16) * 0x10001; sum67 += (sum45 >> 16) * 0x10001; uint32_t sum_w = sum67 >> 16; block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w); __syncthreads(); sum_w = (sum_w - (sum67 >> 16)) * 0x10001; s->map.u32[t * 4 + 0] = sum_w + sum01 - count01; s->map.u32[t * 4 + 1] = sum_w + sum23 - count23; s->map.u32[t * 4 + 2] = sum_w + sum45 - count45; s->map.u32[t * 4 + 3] = sum_w + sum67 - count67; __syncthreads(); } // Put the indices back in hash order for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row; bool collision; if (i + t < nnz) { ck_row = dict_data[i + t] - start_row; hash = nvstr_init_hash(ck_data[ck_row].ptr, static_cast<uint32_t>(ck_data[ck_row].count)); sh = (hash & 1) ? 16 : 0; pos_old = s->map.u16[hash]; } // The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic // behavior for the first row in the hash map that will be used for early duplicate detection __syncthreads(); if (i + t < nnz) { pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff; s->dict[pos] = ck_row; } __syncthreads(); collision = false; if (i + t < nnz) { pos_new = s->map.u16[hash]; collision = (pos != pos_old && pos_new > pos_old + 1); if (collision) { colliding_row = s->dict[pos_old]; } } __syncthreads(); if (collision) { atomicMin(s->dict + pos_old, ck_row); } __syncthreads(); // Resolve collision if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; } } __syncthreads(); // Now that the strings are ordered by hash, compare every string with the first entry in the hash // map, the position of the first string can be inferred from the hash map counts dict_char_count = 0; for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0; if (i + t < nnz) { const char *str1, *str2; uint32_t len1, len2, hash; ck_row = s->dict[i + t]; str1 = ck_data[ck_row].ptr; len1 = static_cast<uint32_t>(ck_data[ck_row].count); hash = nvstr_init_hash(str1, len1); ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0]; if (ck_row_ref != ck_row) { str2 = ck_data[ck_row_ref].ptr; len2 = static_cast<uint32_t>(ck_data[ck_row_ref].count); is_dupe = nvstr_is_equal(str1, len1, str2, len2); dict_char_count += (is_dupe) ? 0 : len1; } } uint32_t dupes_in_block; uint32_t dupes_before; block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block); dupes_before += s->total_dupes; __syncthreads(); if (!t) { s->total_dupes += dupes_in_block; } if (i + t < nnz) { if (!is_dupe) { dict_data[i + t - dupes_before] = ck_row + start_row; } else { s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31); } } } // temp_storage is being used twice, so make sure there is `__syncthreads()` between them // while making any future changes. dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count); if (!t) { chunks[group_id * num_columns + col_id].num_strings = nnz; chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count; chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes; chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count; } } /** * @brief In-place concatenate dictionary data for all chunks in each stripe * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {1024,1,1} extern "C" __global__ void __launch_bounds__(1024) gpuCompactChunkDictionaries(StripeDictionary *stripes, DictionaryChunk const *chunks, uint32_t num_columns) { __shared__ __align__(16) StripeDictionary stripe_g; __shared__ __align__(16) DictionaryChunk chunk_g; __shared__ const uint32_t *volatile ck_curptr_g; __shared__ uint32_t volatile ck_curlen_g; uint32_t col_id = blockIdx.x; uint32_t stripe_id = blockIdx.y; uint32_t chunk_len; int t = threadIdx.x; const uint32_t *src; uint32_t *dst; if (t == 0) stripe_g = stripes[stripe_id * num_columns + col_id]; __syncthreads(); if (!stripe_g.dict_data) { return; } if (t == 0) chunk_g = chunks[stripe_g.start_chunk * num_columns + col_id]; __syncthreads(); dst = stripe_g.dict_data + chunk_g.num_dict_strings; for (uint32_t g = 1; g < stripe_g.num_chunks; g++) { if (!t) { src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data; chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings; ck_curptr_g = src; ck_curlen_g = chunk_len; } __syncthreads(); src = ck_curptr_g; chunk_len = ck_curlen_g; if (src != dst) { for (uint32_t i = 0; i < chunk_len; i += 1024) { uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0; __syncthreads(); if (i + t < chunk_len) dst[i + t] = idx; } } dst += chunk_len; __syncthreads(); } } struct build_state_s { uint32_t total_dupes; StripeDictionary stripe; volatile uint32_t scratch_red[32]; }; /** * @brief Eliminate duplicates in-place and generate column dictionary index * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] num_columns Number of string columns */ // NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary // blockDim {1024,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns) { __shared__ __align__(16) build_state_s state_g; using block_reduce = hipcub::BlockReduce<uint32_t, block_size>; using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; build_state_s *const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t stripe_id = blockIdx.y; uint32_t num_strings; uint32_t *dict_data, *dict_index; uint32_t dict_char_count; const nvstrdesc_s *str_data; int t = threadIdx.x; if (t == 0) s->stripe = stripes[stripe_id * num_columns + col_id]; if (t == 31 * 32) { s->total_dupes = 0; } __syncthreads(); num_strings = s->stripe.num_strings; dict_data = s->stripe.dict_data; if (!dict_data) return; dict_index = s->stripe.dict_index; str_data = static_cast<const nvstrdesc_s *>(s->stripe.column_data_base); dict_char_count = 0; for (uint32_t i = 0; i < num_strings; i += block_size) { uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0; uint32_t cur_len = 0; const char *cur_ptr; bool is_dupe = false; if (i + t < num_strings) { cur_ptr = str_data[cur].ptr; cur_len = str_data[cur].count; } if (i + t != 0 && i + t < num_strings) { uint32_t prev = dict_data[i + t - 1]; is_dupe = nvstr_is_equal(cur_ptr, cur_len, str_data[prev].ptr, str_data[prev].count); } dict_char_count += (is_dupe) ? 0 : cur_len; uint32_t dupes_in_block; uint32_t dupes_before; block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block); dupes_before += s->total_dupes; __syncthreads(); if (!t) { s->total_dupes += dupes_in_block; } if (i + t < num_strings) { dict_index[cur] = i + t - dupes_before; if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; } } __syncthreads(); } dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count); if (t == 0) { stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes; stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count; } } /** * @brief Launches kernel for initializing dictionary chunks * * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of row groups * @param[in] stream CUDA stream to use, default 0 */ void InitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per chunk dim3 dim_grid(num_columns, num_rowgroups); hipLaunchKernelGGL(( gpuInitDictionaryIndices<512>), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_columns); } /** * @brief Launches kernel for building stripe dictionaries * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] stripes_host StripeDictionary host array [stripe][column] * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] num_columns Number of columns * @param[in] stream CUDA stream to use, default 0 */ void BuildStripeDictionaries(StripeDictionary *stripes, StripeDictionary *stripes_host, DictionaryChunk const *chunks, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t num_columns, rmm::cuda_stream_view stream) { dim3 dim_block(1024, 1); // 1024 threads per chunk dim3 dim_grid_build(num_columns, num_stripes); hipLaunchKernelGGL(( gpuCompactChunkDictionaries), dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), stripes, chunks, num_columns); for (uint32_t i = 0; i < num_stripes * num_columns; i++) { if (stripes_host[i].dict_data != nullptr) { thrust::device_ptr<uint32_t> p = thrust::device_pointer_cast(stripes_host[i].dict_data); const nvstrdesc_s *str_data = static_cast<const nvstrdesc_s *>(stripes_host[i].column_data_base); // NOTE: Requires the --expt-extended-lambda nvcc flag thrust::sort(rmm::exec_policy(stream), p, p + stripes_host[i].num_strings, [str_data] __device__(const uint32_t &lhs, const uint32_t &rhs) { return nvstr_is_lesser(str_data[lhs].ptr, (uint32_t)str_data[lhs].count, str_data[rhs].ptr, (uint32_t)str_data[rhs].count); }); } } hipLaunchKernelGGL(( gpuBuildStripeDictionaries<1024>) , dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), stripes, num_columns); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
dict_enc.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> namespace cudf { namespace io { namespace orc { namespace gpu { constexpr uint32_t max_dict_entries = default_row_index_stride; constexpr int init_hash_bits = 12; struct dictinit_state_s { uint32_t nnz; uint32_t total_dupes; DictionaryChunk chunk; volatile uint32_t scratch_red[32]; uint32_t dict[max_dict_entries]; union { uint16_t u16[1 << (init_hash_bits)]; uint32_t u32[1 << (init_hash_bits - 1)]; } map; }; /** * @brief Return a 12-bit hash from a byte sequence */ static inline __device__ uint32_t nvstr_init_hash(char const *ptr, uint32_t len) { if (len != 0) { return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1); } else { return 0; } } /** * @brief Fill dictionary with the indices of non-null rows * * @param[in,out] s dictionary builder state * @param[in] t thread id * @param[in] temp_storage shared memory storage to scan non-null positions */ template <int block_size, typename Storage> static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s, int t, Storage &temp_storage) { if (t == 0) { s->nnz = 0; } for (uint32_t i = 0; i < s->chunk.num_rows; i += 512) { const uint32_t *valid_map = s->chunk.valid_map_base; uint32_t is_valid, nz_pos; if (t < 16) { if (!valid_map) { s->scratch_red[t] = 0xffffffffu; } else { uint32_t row = s->chunk.start_row + i + t * 32; uint32_t v = (row < s->chunk.start_row + s->chunk.num_rows) ? valid_map[(row + s->chunk.column_offset) / 32] : 0; if (row & 0x1f) { uint32_t v1 = (row + 32 < s->chunk.start_row + s->chunk.num_rows) ? valid_map[((row + s->chunk.column_offset) / 32) + 1] : 0; v = __funnelshift_r(v, v1, row + s->chunk.column_offset); } s->scratch_red[t] = v; } } __syncthreads(); is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0; uint32_t tmp_nnz; cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage) .ExclusiveSum(is_valid, nz_pos, tmp_nnz); nz_pos += s->nnz; __syncthreads(); if (!t) { s->nnz += tmp_nnz; } if (is_valid) { s->dict[nz_pos] = i + t; } __syncthreads(); } } /** * @brief Gather all non-NULL string rows and compute total character data size * * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size, 2) gpuInitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns) { __shared__ __align__(16) dictinit_state_s state_g; using block_reduce = cub::BlockReduce<uint32_t, block_size>; using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; dictinit_state_s *const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; const nvstrdesc_s *ck_data; uint32_t *dict_data; uint32_t nnz, start_row, dict_char_count; int t = threadIdx.x; if (t == 0) s->chunk = chunks[group_id * num_columns + col_id]; for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) { if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0; } __syncthreads(); // First, take care of NULLs, and count how many strings we have (TODO: bypass this step when // there are no nulls) LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage); // Sum the lengths of all the strings if (t == 0) { s->chunk.string_char_count = 0; s->total_dupes = 0; } nnz = s->nnz; dict_data = s->chunk.dict_data; start_row = s->chunk.start_row; ck_data = static_cast<const nvstrdesc_s *>(s->chunk.column_data_base) + start_row; for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0; uint32_t hash = 0; uint32_t len = 0; if (i + t < nnz) { ck_row = s->dict[i + t]; len = static_cast<uint32_t>(ck_data[ck_row].count); hash = nvstr_init_hash(ck_data[ck_row].ptr, len); } len = block_reduce(temp_storage.reduce_storage).Sum(len); if (t == 0) s->chunk.string_char_count += len; if (i + t < nnz) { atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0)); dict_data[i + t] = start_row + ck_row; } __syncthreads(); } // Reorder the 16-bit local indices according to the hash value of the strings static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12"); { // Cumulative sum of hash map counts uint32_t count01 = s->map.u32[t * 4 + 0]; uint32_t count23 = s->map.u32[t * 4 + 1]; uint32_t count45 = s->map.u32[t * 4 + 2]; uint32_t count67 = s->map.u32[t * 4 + 3]; uint32_t sum01 = count01 + (count01 << 16); uint32_t sum23 = count23 + (count23 << 16); uint32_t sum45 = count45 + (count45 << 16); uint32_t sum67 = count67 + (count67 << 16); sum23 += (sum01 >> 16) * 0x10001; sum45 += (sum23 >> 16) * 0x10001; sum67 += (sum45 >> 16) * 0x10001; uint32_t sum_w = sum67 >> 16; block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w); __syncthreads(); sum_w = (sum_w - (sum67 >> 16)) * 0x10001; s->map.u32[t * 4 + 0] = sum_w + sum01 - count01; s->map.u32[t * 4 + 1] = sum_w + sum23 - count23; s->map.u32[t * 4 + 2] = sum_w + sum45 - count45; s->map.u32[t * 4 + 3] = sum_w + sum67 - count67; __syncthreads(); } // Put the indices back in hash order for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row; bool collision; if (i + t < nnz) { ck_row = dict_data[i + t] - start_row; hash = nvstr_init_hash(ck_data[ck_row].ptr, static_cast<uint32_t>(ck_data[ck_row].count)); sh = (hash & 1) ? 16 : 0; pos_old = s->map.u16[hash]; } // The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic // behavior for the first row in the hash map that will be used for early duplicate detection __syncthreads(); if (i + t < nnz) { pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff; s->dict[pos] = ck_row; } __syncthreads(); collision = false; if (i + t < nnz) { pos_new = s->map.u16[hash]; collision = (pos != pos_old && pos_new > pos_old + 1); if (collision) { colliding_row = s->dict[pos_old]; } } __syncthreads(); if (collision) { atomicMin(s->dict + pos_old, ck_row); } __syncthreads(); // Resolve collision if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; } } __syncthreads(); // Now that the strings are ordered by hash, compare every string with the first entry in the hash // map, the position of the first string can be inferred from the hash map counts dict_char_count = 0; for (uint32_t i = 0; i < nnz; i += block_size) { uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0; if (i + t < nnz) { const char *str1, *str2; uint32_t len1, len2, hash; ck_row = s->dict[i + t]; str1 = ck_data[ck_row].ptr; len1 = static_cast<uint32_t>(ck_data[ck_row].count); hash = nvstr_init_hash(str1, len1); ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0]; if (ck_row_ref != ck_row) { str2 = ck_data[ck_row_ref].ptr; len2 = static_cast<uint32_t>(ck_data[ck_row_ref].count); is_dupe = nvstr_is_equal(str1, len1, str2, len2); dict_char_count += (is_dupe) ? 0 : len1; } } uint32_t dupes_in_block; uint32_t dupes_before; block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block); dupes_before += s->total_dupes; __syncthreads(); if (!t) { s->total_dupes += dupes_in_block; } if (i + t < nnz) { if (!is_dupe) { dict_data[i + t - dupes_before] = ck_row + start_row; } else { s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31); } } } // temp_storage is being used twice, so make sure there is `__syncthreads()` between them // while making any future changes. dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count); if (!t) { chunks[group_id * num_columns + col_id].num_strings = nnz; chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count; chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes; chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count; } } /** * @brief In-place concatenate dictionary data for all chunks in each stripe * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {1024,1,1} extern "C" __global__ void __launch_bounds__(1024) gpuCompactChunkDictionaries(StripeDictionary *stripes, DictionaryChunk const *chunks, uint32_t num_columns) { __shared__ __align__(16) StripeDictionary stripe_g; __shared__ __align__(16) DictionaryChunk chunk_g; __shared__ const uint32_t *volatile ck_curptr_g; __shared__ uint32_t volatile ck_curlen_g; uint32_t col_id = blockIdx.x; uint32_t stripe_id = blockIdx.y; uint32_t chunk_len; int t = threadIdx.x; const uint32_t *src; uint32_t *dst; if (t == 0) stripe_g = stripes[stripe_id * num_columns + col_id]; __syncthreads(); if (!stripe_g.dict_data) { return; } if (t == 0) chunk_g = chunks[stripe_g.start_chunk * num_columns + col_id]; __syncthreads(); dst = stripe_g.dict_data + chunk_g.num_dict_strings; for (uint32_t g = 1; g < stripe_g.num_chunks; g++) { if (!t) { src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data; chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings; ck_curptr_g = src; ck_curlen_g = chunk_len; } __syncthreads(); src = ck_curptr_g; chunk_len = ck_curlen_g; if (src != dst) { for (uint32_t i = 0; i < chunk_len; i += 1024) { uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0; __syncthreads(); if (i + t < chunk_len) dst[i + t] = idx; } } dst += chunk_len; __syncthreads(); } } struct build_state_s { uint32_t total_dupes; StripeDictionary stripe; volatile uint32_t scratch_red[32]; }; /** * @brief Eliminate duplicates in-place and generate column dictionary index * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] num_columns Number of string columns */ // NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary // blockDim {1024,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns) { __shared__ __align__(16) build_state_s state_g; using block_reduce = cub::BlockReduce<uint32_t, block_size>; using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; build_state_s *const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t stripe_id = blockIdx.y; uint32_t num_strings; uint32_t *dict_data, *dict_index; uint32_t dict_char_count; const nvstrdesc_s *str_data; int t = threadIdx.x; if (t == 0) s->stripe = stripes[stripe_id * num_columns + col_id]; if (t == 31 * 32) { s->total_dupes = 0; } __syncthreads(); num_strings = s->stripe.num_strings; dict_data = s->stripe.dict_data; if (!dict_data) return; dict_index = s->stripe.dict_index; str_data = static_cast<const nvstrdesc_s *>(s->stripe.column_data_base); dict_char_count = 0; for (uint32_t i = 0; i < num_strings; i += block_size) { uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0; uint32_t cur_len = 0; const char *cur_ptr; bool is_dupe = false; if (i + t < num_strings) { cur_ptr = str_data[cur].ptr; cur_len = str_data[cur].count; } if (i + t != 0 && i + t < num_strings) { uint32_t prev = dict_data[i + t - 1]; is_dupe = nvstr_is_equal(cur_ptr, cur_len, str_data[prev].ptr, str_data[prev].count); } dict_char_count += (is_dupe) ? 0 : cur_len; uint32_t dupes_in_block; uint32_t dupes_before; block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block); dupes_before += s->total_dupes; __syncthreads(); if (!t) { s->total_dupes += dupes_in_block; } if (i + t < num_strings) { dict_index[cur] = i + t - dupes_before; if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; } } __syncthreads(); } dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count); if (t == 0) { stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes; stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count; } } /** * @brief Launches kernel for initializing dictionary chunks * * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of row groups * @param[in] stream CUDA stream to use, default 0 */ void InitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per chunk dim3 dim_grid(num_columns, num_rowgroups); gpuInitDictionaryIndices<512><<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_columns); } /** * @brief Launches kernel for building stripe dictionaries * * @param[in] stripes StripeDictionary device array [stripe][column] * @param[in] stripes_host StripeDictionary host array [stripe][column] * @param[in] chunks DictionaryChunk device array [rowgroup][column] * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] num_columns Number of columns * @param[in] stream CUDA stream to use, default 0 */ void BuildStripeDictionaries(StripeDictionary *stripes, StripeDictionary *stripes_host, DictionaryChunk const *chunks, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t num_columns, rmm::cuda_stream_view stream) { dim3 dim_block(1024, 1); // 1024 threads per chunk dim3 dim_grid_build(num_columns, num_stripes); gpuCompactChunkDictionaries<<<dim_grid_build, dim_block, 0, stream.value()>>>( stripes, chunks, num_columns); for (uint32_t i = 0; i < num_stripes * num_columns; i++) { if (stripes_host[i].dict_data != nullptr) { thrust::device_ptr<uint32_t> p = thrust::device_pointer_cast(stripes_host[i].dict_data); const nvstrdesc_s *str_data = static_cast<const nvstrdesc_s *>(stripes_host[i].column_data_base); // NOTE: Requires the --expt-extended-lambda nvcc flag thrust::sort(rmm::exec_policy(stream), p, p + stripes_host[i].num_strings, [str_data] __device__(const uint32_t &lhs, const uint32_t &rhs) { return nvstr_is_lesser(str_data[lhs].ptr, (uint32_t)str_data[lhs].count, str_data[rhs].ptr, (uint32_t)str_data[rhs].count); }); } } gpuBuildStripeDictionaries<1024> <<<dim_grid_build, dim_block, 0, stream.value()>>>(stripes, num_columns); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
85992a85f70b23853dc755273ecf365720e082b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! @file * \brief Descriptions and declarations for structures used in GPU * * <pre> * -- Distributed SuperLU routine (version 7.2) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley, * Georgia Institute of Technology, Oak Ridge National Laboratory * * Last update: November 14, 2021 remove dependence on CUB/scan * </pre> */ //#define GPU_DEBUG #include "superlu_defs.h" #undef Reduce //#include <thrust/system/hip/detail/hipcub/hipcub.hpp> #include "zlustruct_gpu.h" #ifdef HAVE_HIP #include "superlu_gpu_utils.hip.cpp" #endif #include "dcomplex.h" //extern "C" { // void cblas_daxpy(const int N, const double alpha, const double *X, // const int incX, double *Y, const int incY); //} // gpublasStatus_t checkGPUblas(gpublasStatus_t result) // { // #if defined(DEBUG) || defined(_DEBUG) // if (result != GPUBLAS_STATUS_SUCCESS) // { // fprintf(stderr, "GPU BLAS Runtime Error: %s\n", gpublasGetErrorString(result)); // assert(result == GPUBLAS_STATUS_SUCCESS); // } // #endif // return result; // } // #define UNIT_STRIDE #if 0 ////////// this routine is not used anymore __device__ inline void device_scatter_l (int_t thread_id, int_t nsupc, int_t temp_nbrow, int_t *usub, int_t iukp, int_t klst, doublecomplex *nzval, int_t ldv, doublecomplex *tempv, int_t nbrow, // int_t *indirect2_thread int *indirect2_thread ) { int_t segsize, jj; for (jj = 0; jj < nsupc; ++jj) { segsize = klst - usub[iukp + jj]; if (segsize) { if (thread_id < temp_nbrow) { #ifndef UNIT_STRIDE nzval[indirect2_thread[thread_id]] -= tempv[thread_id]; #else nzval[thread_id] -= tempv[thread_id]; /*making access unit strided*/ #endif } tempv += nbrow; } nzval += ldv; } } #endif ///////////// not used //#define THREAD_BLOCK_SIZE 256 /* Sherry: was 192. should be <= MAX_SUPER_SIZE */ __device__ inline void zdevice_scatter_l_2D (int thread_id, int nsupc, int temp_nbrow, int_t *usub, int iukp, int_t klst, doublecomplex *nzval, int ldv, const doublecomplex *tempv, int nbrow, int *indirect2_thread, int nnz_cols, int ColPerBlock, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #define UNROLL_ITER 8 #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = ldv * IndirectJ3[col] + indirect2_thread[thread_id_x]; z_sub(&nzval[i], &nzval[i], &tempv[nbrow * col + thread_id_x]); } } } /* Sherry: this routine is not used */ #if 0 ////////////////////////////////////////////// __global__ void cub_scan_test(void) { int thread_id = threadIdx.x; typedef hipcub::BlockScan<int, MAX_SUPER_SIZE > BlockScan; /*1D int data type*/ __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/ __shared__ int IndirectJ1[MAX_SUPER_SIZE]; __shared__ int IndirectJ2[MAX_SUPER_SIZE]; if (thread_id < MAX_SUPER_SIZE) { IndirectJ1[thread_id] = (thread_id + 1) % 2; } __syncthreads(); if (thread_id < MAX_SUPER_SIZE) BlockScan(temp_storage).InclusiveSum (IndirectJ1[thread_id], IndirectJ2[thread_id]); if (thread_id < MAX_SUPER_SIZE) printf("%d %d\n", thread_id, IndirectJ2[thread_id]); } #endif /////////////////////////////////// not used __device__ inline void device_scatter_u_2D (int thread_id, int temp_nbrow, int nsupc, doublecomplex * ucol, int_t * usub, int iukp, int_t ilst, int_t klst, int_t * index, int iuip_lib, doublecomplex * tempv, int nbrow, int *indirect, int nnz_cols, int ColPerBlock, int *IndirectJ1, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { /* 1D threads are logically arranged in 2D shape. */ int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = IndirectJ1[IndirectJ3[col]]-ilst + indirect[thread_id_x]; z_sub(&ucol[i], &ucol[i], &tempv[nbrow * col + thread_id_x]); } } } __global__ void Scatter_GPU_kernel( int_t streamId, int_t ii_st, int_t ii_end, int_t jj_st, int_t jj_end, /* defines rectangular Schur block to be scatter */ int_t klst, int_t jj0, /* 0 on entry */ int_t nrows, int_t ldt, int_t npcol, int_t nprow, zLUstruct_gpu_t * A_gpu) { /* initializing pointers */ int_t *xsup = A_gpu->xsup; int_t *UrowindPtr = A_gpu->UrowindPtr; int_t *UrowindVec = A_gpu->UrowindVec; int_t *UnzvalPtr = A_gpu->UnzvalPtr; doublecomplex *UnzvalVec = A_gpu->UnzvalVec; int_t *LrowindPtr = A_gpu->LrowindPtr; int_t *LrowindVec = A_gpu->LrowindVec; int_t *LnzvalPtr = A_gpu->LnzvalPtr; doublecomplex *LnzvalVec = A_gpu->LnzvalVec; doublecomplex *bigV = A_gpu->scubufs[streamId].bigV; local_l_blk_info_t *local_l_blk_infoVec = A_gpu->local_l_blk_infoVec; local_u_blk_info_t *local_u_blk_infoVec = A_gpu->local_u_blk_infoVec; int_t *local_l_blk_infoPtr = A_gpu->local_l_blk_infoPtr; int_t *local_u_blk_infoPtr = A_gpu->local_u_blk_infoPtr; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info; int_t *lsub = A_gpu->scubufs[streamId].lsub; int_t *usub = A_gpu->scubufs[streamId].usub; /* thread block assignment: this thread block is assigned to block (lb, j) in 2D grid */ int lb = blockIdx.x + ii_st; int j = blockIdx.y + jj_st; extern __shared__ int s[]; int* indirect_lptr = s; /* row-wise */ int* indirect2_thread= (int*) &indirect_lptr[ldt]; /* row-wise */ int* IndirectJ1= (int*) &indirect2_thread[ldt]; /* column-wise */ int* IndirectJ3= (int*) &IndirectJ1[ldt]; /* column-wise */ //int THREAD_BLOCK_SIZE =ldt; int* pfxStorage = (int*) &IndirectJ3[ldt]; int thread_id = threadIdx.x; int iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize (jb); int ljb = jb / npcol; typedef int pfx_dtype ; extern __device__ void incScan(pfx_dtype *inOutArr, pfx_dtype *temp, int n); doublecomplex *tempv1; if (jj_st == jj0) { tempv1 = (j == jj_st) ? bigV : bigV + Ublock_info[j - 1].full_u_cols * nrows; } else { tempv1 = (j == jj_st) ? bigV : bigV + (Ublock_info[j - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols) * nrows; } /* # of nonzero columns in block j */ int nnz_cols = (j == 0) ? Ublock_info[j].full_u_cols : (Ublock_info[j].full_u_cols - Ublock_info[j - 1].full_u_cols); int cum_ncol = (j == 0) ? 0 : Ublock_info[j - 1].full_u_cols; int lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr + 1]; /* number of rows in the current L block */ lptr += LB_DESCRIPTOR; int_t cum_nrow; if (ii_st == 0) { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow); } else { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow - Remain_info[ii_st - 1].FullRow); } tempv1 += cum_nrow; if (ib < jb) /*scatter U code */ { int ilst = FstBlockC (ib + 1); int lib = ib / nprow; /* local index of row block ib */ int_t *index = &UrowindVec[UrowindPtr[lib]]; int num_u_blocks = index[0]; int ljb = (jb) / npcol; /* local index of column block jb */ /* Each thread is responsible for one block column */ __shared__ int ljb_ind; /*do a search ljb_ind at local row lib*/ int blks_per_threads = CEILING(num_u_blocks, blockDim.x); // printf("blockDim.x =%d \n", blockDim.x); for (int i = 0; i < blks_per_threads; ++i) /* each thread is assigned a chunk of consecutive U blocks to search */ { /* only one thread finds the block index matching ljb */ if (thread_id * blks_per_threads + i < num_u_blocks && local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + thread_id * blks_per_threads + i ].ljb == ljb) { ljb_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int iuip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].iuip; int ruip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].ruip; iuip_lib += UB_DESCRIPTOR; doublecomplex *Unzval_lib = &UnzvalVec[UnzvalPtr[lib]]; doublecomplex *ucol = &Unzval_lib[ruip_lib]; if (thread_id < temp_nbrow) /* row-wise */ { /* cyclically map each thread to a row */ indirect_lptr[thread_id] = (int) lsub[lptr + thread_id]; } /* column-wise: each thread is assigned one column */ if (thread_id < nnz_cols) IndirectJ3[thread_id] = A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ __syncthreads(); /* threads are divided into multiple columns */ int ColPerBlock = blockDim.x / temp_nbrow; // if (thread_id < blockDim.x) // IndirectJ1[thread_id] = 0; if (thread_id < ldt) IndirectJ1[thread_id] = 0; if (thread_id < blockDim.x) { if (thread_id < nsupc) { /* fstnz subscript of each column in the block */ IndirectJ1[thread_id] = -index[iuip_lib + thread_id] + ilst; } } /* perform an inclusive block-wide prefix sum among all threads */ __syncthreads(); incScan(IndirectJ1, pfxStorage, nsupc); __syncthreads(); device_scatter_u_2D ( thread_id, temp_nbrow, nsupc, ucol, usub, iukp, ilst, klst, index, iuip_lib, tempv1, nrows, indirect_lptr, nnz_cols, ColPerBlock, IndirectJ1, IndirectJ3 ); } else /* ib >= jb, scatter L code */ { int rel; doublecomplex *nzval; int_t *index = &LrowindVec[LrowindPtr[ljb]]; int num_l_blocks = index[0]; int ldv = index[1]; int fnz = FstBlockC (ib); int lib = ib / nprow; __shared__ int lib_ind; /*do a search lib_ind for lib*/ int blks_per_threads = CEILING(num_l_blocks, blockDim.x); for (int i = 0; i < blks_per_threads; ++i) { if (thread_id * blks_per_threads + i < num_l_blocks && local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + thread_id * blks_per_threads + i ].lib == lib) { lib_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int lptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].lptrj; int luptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].luptrj; lptrj += LB_DESCRIPTOR; int dest_nbrow = index[lptrj - 1]; if (thread_id < dest_nbrow) { rel = index[lptrj + thread_id] - fnz; indirect_lptr[rel] = thread_id; } __syncthreads(); /* can be precalculated */ if (thread_id < temp_nbrow) { rel = lsub[lptr + thread_id] - fnz; indirect2_thread[thread_id] = indirect_lptr[rel]; } if (thread_id < nnz_cols) IndirectJ3[thread_id] = (int) A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; __syncthreads(); int ColPerBlock = blockDim.x / temp_nbrow; nzval = &LnzvalVec[LnzvalPtr[ljb]] + luptrj; zdevice_scatter_l_2D( thread_id, nsupc, temp_nbrow, usub, iukp, klst, nzval, ldv, tempv1, nrows, indirect2_thread, nnz_cols, ColPerBlock, IndirectJ3); } /* end else ib >= jb */ } /* end Scatter_GPU_kernel */ #define GPU_2D_SCHUDT /* Not used */ int zSchurCompUpdate_GPU( int_t streamId, int_t jj_cpu, /* 0 on entry, pointing to the start of Phi part */ int_t nub, /* jj_cpu on entry, pointing to the end of the Phi part */ int_t klst, int_t knsupc, int_t Rnbrow, int_t RemainBlk, int_t Remain_lbuf_send_size, int_t bigu_send_size, int_t ldu, int_t mcb, /* num_u_blks_hi */ int_t buffer_size, int_t lsub_len, int_t usub_len, int_t ldt, int_t k0, zsluGPU_t *sluGPU, gridinfo_t *grid, SuperLUStat_t *stat ) { int SCATTER_THREAD_BLOCK_SIZE=512; zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; zLUstruct_gpu_t * dA_gpu = sluGPU->dA_gpu; int_t nprow = grid->nprow; int_t npcol = grid->npcol; gpuStream_t FunCallStream = sluGPU->funCallStreams[streamId]; gpublasHandle_t gpublas_handle0 = sluGPU->gpublasHandles[streamId]; int_t * lsub = A_gpu->scubufs[streamId].lsub_buf; int_t * usub = A_gpu->scubufs[streamId].usub_buf; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info_host; doublecomplex * Remain_L_buff = A_gpu->scubufs[streamId].Remain_L_buff_host; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info_host; doublecomplex * bigU = A_gpu->scubufs[streamId].bigU_host; stat->isOffloaded[k0] = 1; /* start by sending data to */ int_t *xsup = A_gpu->xsup_host; int_t col_back = (jj_cpu == 0) ? 0 : Ublock_info[jj_cpu - 1].full_u_cols; // if(nub<1) return; int_t ncols = Ublock_info[nub - 1].full_u_cols - col_back; /* Sherry: can get max_super_size from sp_ienv(3) */ int_t indirectJ1[MAX_SUPER_SIZE]; // 0 indicates an empry segment int_t indirectJ2[MAX_SUPER_SIZE]; // # of nonzero segments so far int_t indirectJ3[MAX_SUPER_SIZE]; /* indirectJ3[j] == k means the j-th nonzero segment points to column k in this supernode */ /* calculate usub_indirect */ for (int jj = jj_cpu; jj < nub; ++jj) { int_t iukp = Ublock_info[jj].iukp; int_t jb = Ublock_info[jj].jb; int_t nsupc = SuperSize (jb); int_t addr = (jj == 0) ? 0 : Ublock_info[jj - 1].full_u_cols - col_back; for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ1[kk] = 0; } for (int_t kk = 0; kk < nsupc; ++kk) { indirectJ1[kk] = ((klst - usub[iukp + kk]) == 0) ? 0 : 1; } /*prefix sum - indicates # of nonzero segments up to column kk */ indirectJ2[0] = indirectJ1[0]; for (int_t kk = 1; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ2[kk] = indirectJ2[kk - 1] + indirectJ1[kk]; } /* total number of nonzero segments in this supernode */ int nnz_col = indirectJ2[nsupc - 1]; // old: MAX_SUPER_SIZE /* compactation */ for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { if (indirectJ1[kk]) /* kk is a nonzero segment */ { /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ indirectJ3[indirectJ2[kk] - 1] = kk; } } for (int i = 0; i < nnz_col; ++i) { /* addr == total # of full columns before current block jj */ A_gpu->scubufs[streamId].usub_IndirectJ3_host[addr + i] = indirectJ3[i]; } } /* end for jj ... calculate usub_indirect */ //printf("zSchurCompUpdate_GPU[3]: jj_cpu %d, nub %d\n", jj_cpu, nub); fflush(stdout); /*sizeof RemainLbuf = Rnbuf*knsupc */ double tTmp = SuperLU_timer_(); gpuEventRecord(stat->ePCIeH2D[k0], FunCallStream); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub_IndirectJ3, A_gpu->scubufs[streamId].usub_IndirectJ3_host, ncols * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_L_buff, Remain_L_buff, Remain_lbuf_send_size * sizeof(doublecomplex), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].bigU, bigU, bigu_send_size * sizeof(doublecomplex), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_info, Remain_info, RemainBlk * sizeof(Remain_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Ublock_info, Ublock_info, mcb * sizeof(Ublock_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].lsub, lsub, lsub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub, usub, usub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); stat->tHost_PCIeH2D += SuperLU_timer_() - tTmp; stat->cPCIeH2D += Remain_lbuf_send_size * sizeof(doublecomplex) + bigu_send_size * sizeof(doublecomplex) + RemainBlk * sizeof(Remain_info_t) + mcb * sizeof(Ublock_info_t) + lsub_len * sizeof(int_t) + usub_len * sizeof(int_t); doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0}; /* The following are used in gpublasZgemm() call */ gpuDoubleComplex *cu_alpha = (gpuDoubleComplex *) &alpha; gpuDoubleComplex *cu_beta = (gpuDoubleComplex *) &beta; gpuDoubleComplex *cu_A, *cu_B, *cu_C; /* C <- A*B */ int_t ii_st = 0; int_t ii_end = 0; int_t maxGemmBlockDim = (int) sqrt(buffer_size); // int_t maxGemmBlockDim = 8000; /* Organize GEMM by blocks of [ii_st : ii_end, jj_st : jj_end] that fits in the buffer_size */ while (ii_end < RemainBlk) { ii_st = ii_end; ii_end = RemainBlk; int_t nrow_max = maxGemmBlockDim; // nrow_max = Rnbrow; int_t remaining_rows = (ii_st == 0) ? Rnbrow : Rnbrow - Remain_info[ii_st - 1].FullRow; nrow_max = (remaining_rows / nrow_max) > 0 ? remaining_rows / CEILING(remaining_rows, nrow_max) : nrow_max; int_t ResRow = (ii_st == 0) ? 0 : Remain_info[ii_st - 1].FullRow; for (int_t i = ii_st; i < RemainBlk - 1; ++i) { if ( Remain_info[i + 1].FullRow > ResRow + nrow_max) { ii_end = i; break; /* row dimension reaches nrow_max */ } } int_t nrows; /* actual row dimension for GEMM */ int_t st_row; if (ii_st > 0) { nrows = Remain_info[ii_end - 1].FullRow - Remain_info[ii_st - 1].FullRow; st_row = Remain_info[ii_st - 1].FullRow; } else { nrows = Remain_info[ii_end - 1].FullRow; st_row = 0; } int jj_st = jj_cpu; int jj_end = jj_cpu; while (jj_end < nub && nrows > 0 ) { int_t remaining_cols = (jj_st == jj_cpu) ? ncols : ncols - Ublock_info[jj_st - 1].full_u_cols; if ( remaining_cols * nrows < buffer_size) { jj_st = jj_end; jj_end = nub; } else /* C matrix cannot fit in buffer, need to break into pieces */ { int_t ncol_max = buffer_size / nrows; /** Must revisit **/ ncol_max = SUPERLU_MIN(ncol_max, maxGemmBlockDim); ncol_max = (remaining_cols / ncol_max) > 0 ? remaining_cols / CEILING(remaining_cols, ncol_max) : ncol_max; jj_st = jj_end; jj_end = nub; int_t ResCol = (jj_st == 0) ? 0 : Ublock_info[jj_st - 1].full_u_cols; for (int_t j = jj_st; j < nub - 1; ++j) { if (Ublock_info[j + 1].full_u_cols > ResCol + ncol_max) { jj_end = j; break; } } } /* end-if-else */ int ncols; int st_col; if (jj_st > 0) { ncols = Ublock_info[jj_end - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols; st_col = Ublock_info[jj_st - 1].full_u_cols; if (ncols == 0) exit(0); } else { ncols = Ublock_info[jj_end - 1].full_u_cols; st_col = 0; } /* none of the matrix dimension is zero. */ if (nrows > 0 && ldu > 0 && ncols > 0) { if (nrows * ncols > buffer_size) { printf("!! Matrix size %lld x %lld exceeds buffer_size %lld\n", nrows, ncols, buffer_size); fflush(stdout); } assert(nrows * ncols <= buffer_size); gpublasSetStream(gpublas_handle0, FunCallStream); gpuEventRecord(stat->GemmStart[k0], FunCallStream); cu_A = (gpuDoubleComplex*) &A_gpu->scubufs[streamId].Remain_L_buff[(knsupc - ldu) * Rnbrow + st_row]; cu_B = (gpuDoubleComplex*) &A_gpu->scubufs[streamId].bigU[st_col * ldu]; cu_C = (gpuDoubleComplex*) A_gpu->scubufs[streamId].bigV; gpublasZgemm(gpublas_handle0, GPUBLAS_OP_N, GPUBLAS_OP_N, nrows, ncols, ldu, cu_alpha, cu_A, Rnbrow, cu_B, ldu, cu_beta, cu_C, nrows); // #define SCATTER_OPT #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchronous #endif gpuEventRecord(stat->GemmEnd[k0], FunCallStream); stat->GemmFLOPCounter += 8.0 * (double) nrows * ncols * ldu; /* * Scattering the output */ // dim3 dimBlock(THREAD_BLOCK_SIZE); // 1d thread dim3 dimBlock(ldt); // 1d thread dim3 dimGrid(ii_end - ii_st, jj_end - jj_st); hipLaunchKernelGGL(( Scatter_GPU_kernel) , dim3(dimGrid), dim3(dimBlock), (4*ldt + 2*SCATTER_THREAD_BLOCK_SIZE)*sizeof(int), FunCallStream, streamId, ii_st, ii_end, jj_st, jj_end, klst, 0, nrows, ldt, npcol, nprow, dA_gpu); #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchrnous #endif gpuEventRecord(stat->ScatterEnd[k0], FunCallStream); stat->ScatterMOPCounter += 3.0 * (double) nrows * ncols; } /* endif ... none of the matrix dimension is zero. */ } /* end while jj_end < nub */ } /* end while (ii_end < RemainBlk) */ return 0; } /* end zSchurCompUpdate_GPU */ static void print_occupancy() { int blockSize; // The launch configurator returned block size int minGridSize; /* The minimum grid size needed to achieve the best potential occupancy */ gpuOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, Scatter_GPU_kernel, 0, 0); printf("Occupancy: MinGridSize %d blocksize %d \n", minGridSize, blockSize); } static void printDevProp(gpuDeviceProp devProp) { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); printf("pciBusID: %d\n", devProp.pciBusID); printf("pciDeviceID: %d\n", devProp.pciDeviceID); printf("GPU Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total free memory: %zu\n", mfree); printf("Clock rate: %d\n", devProp.clockRate); return; } static size_t get_acc_memory () { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); #if 0 printf("Total memory %zu & free memory %zu\n", mtotal, mfree); #endif return (size_t) (0.9 * (double) mfree) / get_mpi_process_per_gpu (); } /* Free all the data structures allocated on GPU. This routine is called from Host */ int zfree_LUstruct_gpu ( zsluGPU_t * sluGPU, SuperLUStat_t* stat ) { zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; int streamId = 0; /* Free the L data structure on GPU */ checkGPU(gpuFree(A_gpu->LrowindVec)); checkGPU(gpuFree(A_gpu->LrowindPtr)); checkGPU(gpuFree(A_gpu->LnzvalVec)); checkGPU(gpuFree(A_gpu->LnzvalPtr)); free(A_gpu->LnzvalPtr_host); /*freeing the pinned memory*/ checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Ublock_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_L_buff_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].bigU_host)); checkGPU(gpuFreeHost(A_gpu->acc_L_buff)); checkGPU(gpuFreeHost(A_gpu->acc_U_buff)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].lsub_buf)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].usub_buf)); SUPERLU_FREE(stat->isOffloaded); // changed to SUPERLU_MALLOC/SUPERLU_FREE SUPERLU_FREE(stat->GemmStart); SUPERLU_FREE(stat->GemmEnd); SUPERLU_FREE(stat->ScatterEnd); SUPERLU_FREE(stat->ePCIeH2D); SUPERLU_FREE(stat->ePCIeD2H_Start); SUPERLU_FREE(stat->ePCIeD2H_End); /* Free the U data structure on GPU */ checkGPU(gpuFree(A_gpu->UrowindVec)); checkGPU(gpuFree(A_gpu->UrowindPtr)); //free(A_gpu->UrowindPtr_host); // Sherry: this is NOT allocated checkGPU(gpuFree(A_gpu->UnzvalVec)); checkGPU(gpuFree(A_gpu->UnzvalPtr)); checkGPU(gpuFree(A_gpu->grid)); /* Free the Schur complement structure on GPU */ checkGPU(gpuFree(A_gpu->scubufs[streamId].bigV)); checkGPU(gpuFree(A_gpu->scubufs[streamId].bigU)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_L_buff)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Ublock_info)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_info)); // checkGPU(gpuFree(A_gpu->indirect)); // checkGPU(gpuFree(A_gpu->indirect2)); checkGPU(gpuFree(A_gpu->xsup)); checkGPU(gpuFree(A_gpu->scubufs[streamId].lsub)); checkGPU(gpuFree(A_gpu->scubufs[streamId].usub)); checkGPU(gpuFree(A_gpu->local_l_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_l_blk_infoPtr)); checkGPU(gpuFree(A_gpu->jib_lookupVec)); checkGPU(gpuFree(A_gpu->jib_lookupPtr)); checkGPU(gpuFree(A_gpu->local_u_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_u_blk_infoPtr)); /* Destroy all the meta-structures associated with the streams. */ gpuStreamDestroy(sluGPU->CopyStream); for (streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamDestroy(sluGPU->funCallStreams[streamId]); gpublasDestroy(sluGPU->gpublasHandles[streamId]); } return 0; } /* end zfree_LUstruct_gpu */ void zPrint_matrix( char *desc, int_t m, int_t n, doublecomplex * dA, int_t lda ) { doublecomplex *cPtr = (doublecomplex *) malloc(sizeof(doublecomplex) * lda * n); checkGPU(gpuMemcpy( cPtr, dA, lda * n * sizeof(doublecomplex), gpuMemcpyDeviceToHost)) ; int_t i, j; printf( "\n %s\n", desc ); for ( i = 0; i < m; i++ ) { for ( j = 0; j < n; j++ ) printf( " %.3e", cPtr[i + j * lda] ); printf( "\n" ); } free(cPtr); } /* Initialize the GPU side of the data structure. */ int zinitSluGPU3D_t( zsluGPU_t *sluGPU, // LU structures on GPU, see zlustruct_gpu.h zLUstruct_t *LUstruct, gridinfo3d_t * grid3d, int_t* perm_c_supno, int_t n, int_t buffer_size, /* read from env variable SUPERLU_MAX_BUFFER_SIZE */ int_t bigu_size, int_t ldt, /* SUPERLU_MAXSUP read from sp_ienv(3) */ SuperLUStat_t *stat ) { checkGPUErrors(gpuDeviceReset ()); Glu_persist_t *Glu_persist = LUstruct->Glu_persist; zLocalLU_t *Llu = LUstruct->Llu; int* isNodeInMyGrid = sluGPU->isNodeInMyGrid; sluGPU->nGPUStreams = getnGPUStreams(); int SCATTER_THREAD_BLOCK_SIZE = ldt; if(getenv("SCATTER_THREAD_BLOCK_SIZE")) { int stbs = atoi(getenv("SCATTER_THREAD_BLOCK_SIZE")); if(stbs>=ldt) { SCATTER_THREAD_BLOCK_SIZE = stbs; } } if (grid3d->iam == 0) { printf("dinitSluGPU3D_t: Using hardware acceleration, with %d gpu streams \n", sluGPU->nGPUStreams); fflush(stdout); printf("dinitSluGPU3D_t: Using %d threads per block for scatter \n", SCATTER_THREAD_BLOCK_SIZE); if ( MAX_SUPER_SIZE < ldt ) { ABORT("MAX_SUPER_SIZE smaller than requested NSUP"); } } gpuStreamCreate(&(sluGPU->CopyStream)); for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamCreate(&(sluGPU->funCallStreams[streamId])); gpublasCreate(&(sluGPU->gpublasHandles[streamId])); sluGPU->lastOffloadStream[streamId] = -1; } sluGPU->A_gpu = (zLUstruct_gpu_t *) malloc (sizeof(zLUstruct_gpu_t)); sluGPU->A_gpu->perm_c_supno = perm_c_supno; /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. */ zCopyLUToGPU3D ( isNodeInMyGrid, Llu, /* referred to as A_host */ sluGPU, Glu_persist, n, grid3d, buffer_size, bigu_size, ldt, stat ); return 0; } /* end zinitSluGPU3D_t */ int zinitD2Hreduce( int next_k, d2Hreduce_t* d2Hred, int last_flag, HyP_t* HyP, zsluGPU_t *sluGPU, gridinfo_t *grid, zLUstruct_t *LUstruct, SCT_t* SCT ) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; zLocalLU_t *Llu = LUstruct->Llu; int_t* xsup = Glu_persist->xsup; int_t iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; // int_t next_col = SUPERLU_MIN (k0 + num_look_aheads + 1, nsupers - 1); // int_t next_k = perm_c_supno[next_col]; /* global block number for next colum*/ int_t mkcol, mkrow; int_t kljb = LBj( next_k, grid ); /*local block number for next block*/ int_t kijb = LBi( next_k, grid ); /*local block number for next block*/ int_t *kindexL ; /*for storing index vectors*/ int_t *kindexU ; mkrow = PROW (next_k, grid); mkcol = PCOL (next_k, grid); int_t ksup_size = SuperSize(next_k); int_t copyL_kljb = 0; int_t copyU_kljb = 0; int_t l_copy_len = 0; int_t u_copy_len = 0; if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL && last_flag) { if (HyP->Lblock_dirty_bit[kljb] > -1) { copyL_kljb = 1; int_t lastk0 = HyP->Lblock_dirty_bit[kljb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } kindexL = Lrowind_bc_ptr[kljb]; l_copy_len = kindexL[1] * ksup_size; } if ( mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL && last_flag ) { if (HyP->Ublock_dirty_bit[kijb] > -1) { copyU_kljb = 1; int_t lastk0 = HyP->Ublock_dirty_bit[kijb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } // copyU_kljb = HyP->Ublock_dirty_bit[kijb]>-1? 1: 0; kindexU = Ufstnz_br_ptr[kijb]; u_copy_len = kindexU[1]; } // wait for streams if they have not been finished // d2Hred->next_col = next_col; d2Hred->next_k = next_k; d2Hred->kljb = kljb; d2Hred->kijb = kijb; d2Hred->copyL_kljb = copyL_kljb; d2Hred->copyU_kljb = copyU_kljb; d2Hred->l_copy_len = l_copy_len; d2Hred->u_copy_len = u_copy_len; d2Hred->kindexU = kindexU; d2Hred->kindexL = kindexL; d2Hred->mkrow = mkrow; d2Hred->mkcol = mkcol; d2Hred->ksup_size = ksup_size; return 0; } /* zinitD2Hreduce */ int zreduceGPUlu( int last_flag, d2Hreduce_t* d2Hred, zsluGPU_t *sluGPU, SCT_t *SCT, gridinfo_t *grid, zLUstruct_t *LUstruct ) { zLocalLU_t *Llu = LUstruct->Llu; int iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; doublecomplex** Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; doublecomplex** Unzval_br_ptr = Llu->Unzval_br_ptr; gpuStream_t CopyStream; zLUstruct_gpu_t *A_gpu; A_gpu = sluGPU->A_gpu; CopyStream = sluGPU->CopyStream; int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t mkrow = d2Hred->mkrow; int_t mkcol = d2Hred->mkcol; int_t ksup_size = d2Hred->ksup_size; int_t *kindex; if ((copyL_kljb || copyU_kljb) && last_flag ) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(CopyStream); SCT->PhiWaitTimer_2 += SuperLU_timer_() - ttx; } double tt_start = SuperLU_timer_(); if (last_flag) { if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL ) { kindex = Lrowind_bc_ptr[kljb]; int_t len = kindex[1]; if (copyL_kljb) { doublecomplex *nzval_host; nzval_host = Lnzval_bc_ptr[kljb]; int_t llen = ksup_size * len; doublecomplex alpha = {1.0, 0.0}; superlu_zaxpy (llen, alpha, A_gpu->acc_L_buff, 1, nzval_host, 1); } } } if (last_flag) { if (mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL ) { kindex = Ufstnz_br_ptr[kijb]; int_t len = kindex[1]; if (copyU_kljb) { doublecomplex *nzval_host; nzval_host = Unzval_br_ptr[kijb]; doublecomplex alpha = {1.0, 0.0}; superlu_zaxpy (len, alpha, A_gpu->acc_U_buff, 1, nzval_host, 1); } } } double tt_end = SuperLU_timer_(); SCT->AssemblyTimer += tt_end - tt_start; return 0; } /* zreduceGPUlu */ int zwaitGPUscu(int streamId, zsluGPU_t *sluGPU, SCT_t *SCT) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; return 0; } int zsendLUpanelGPU2HOST( int_t k0, d2Hreduce_t* d2Hred, zsluGPU_t *sluGPU, SuperLUStat_t *stat ) { int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t l_copy_len = d2Hred->l_copy_len; int_t u_copy_len = d2Hred->u_copy_len; gpuStream_t CopyStream = sluGPU->CopyStream;; zLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; double tty = SuperLU_timer_(); gpuEventRecord(stat->ePCIeD2H_Start[k0], CopyStream); if (copyL_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_L_buff, &A_gpu->LnzvalVec[A_gpu->LnzvalPtr_host[kljb]], l_copy_len * sizeof(doublecomplex), gpuMemcpyDeviceToHost, CopyStream ) ); if (copyU_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_U_buff, &A_gpu->UnzvalVec[A_gpu->UnzvalPtr_host[kijb]], u_copy_len * sizeof(doublecomplex), gpuMemcpyDeviceToHost, CopyStream ) ); gpuEventRecord(stat->ePCIeD2H_End[k0], CopyStream); stat->tHost_PCIeD2H += SuperLU_timer_() - tty; stat->cPCIeD2H += u_copy_len * sizeof(doublecomplex) + l_copy_len * sizeof(doublecomplex); return 0; } /* end zsendLUpanelGPU2HOST */ /* Copy L and U panel data structures from host to the host part of the data structures in A_gpu. GPU is not involved in this routine. */ int zsendSCUdataHost2GPU( int_t streamId, int_t* lsub, int_t* usub, doublecomplex* bigU, int_t bigu_send_size, int_t Remain_lbuf_send_size, zsluGPU_t *sluGPU, HyP_t* HyP ) { //{printf("....[enter] zsendSCUdataHost2GPU, bigu_send_size %d\n", bigu_send_size); fflush(stdout);} int_t usub_len = usub[2]; int_t lsub_len = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; //{printf("....[2] in zsendSCUdataHost2GPU, lsub_len %d\n", lsub_len); fflush(stdout);} zLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; memcpy(A_gpu->scubufs[streamId].lsub_buf, lsub, sizeof(int_t)*lsub_len); memcpy(A_gpu->scubufs[streamId].usub_buf, usub, sizeof(int_t)*usub_len); memcpy(A_gpu->scubufs[streamId].Remain_info_host, HyP->Remain_info, sizeof(Remain_info_t)*HyP->RemainBlk); memcpy(A_gpu->scubufs[streamId].Ublock_info_host, HyP->Ublock_info_Phi, sizeof(Ublock_info_t)*HyP->num_u_blks_Phi); memcpy(A_gpu->scubufs[streamId].Remain_L_buff_host, HyP->Remain_L_buff, sizeof(doublecomplex)*Remain_lbuf_send_size); memcpy(A_gpu->scubufs[streamId].bigU_host, bigU, sizeof(doublecomplex)*bigu_send_size); return 0; } /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. After factorization, the GPU LU structure should be freed by calling zfree_LUstruct_gpu(). */ void zCopyLUToGPU3D ( int* isNodeInMyGrid, zLocalLU_t *A_host, /* distributed LU structure on host */ zsluGPU_t *sluGPU, /* hold LU structure on GPU */ Glu_persist_t *Glu_persist, int_t n, gridinfo3d_t *grid3d, int_t buffer_size, /* bigV size on GPU for Schur complement update */ int_t bigu_size, int_t ldt, SuperLUStat_t *stat ) { gridinfo_t* grid = &(grid3d->grid2d); zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; zLUstruct_gpu_t **dA_gpu = &(sluGPU->dA_gpu); #if ( PRNTlevel>=1 ) if ( grid3d->iam == 0 ) print_occupancy(); #endif #ifdef GPU_DEBUG // if ( grid3d->iam == 0 ) { gpuDeviceProp devProp; gpuGetDeviceProperties(&devProp, 0); printDevProp(devProp); } #endif int_t *xsup ; xsup = Glu_persist->xsup; int iam = grid->iam; int nsupers = Glu_persist->supno[n - 1] + 1; int_t Pc = grid->npcol; int_t Pr = grid->nprow; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; int_t remain_l_max = A_host->bufmax[1]; /*copies of scalars for easy access*/ A_gpu->nsupers = nsupers; stat->ScatterMOPCounter = 0; stat->GemmFLOPCounter = 0; stat->cPCIeH2D = 0; stat->cPCIeD2H = 0; stat->tHost_PCIeH2D = 0; stat->tHost_PCIeD2H = 0; /*initializing memory*/ size_t max_gpu_memory = get_acc_memory (); size_t gpu_mem_used = 0; void *tmp_ptr; A_gpu->xsup_host = xsup; int_t nGPUStreams = sluGPU->nGPUStreams; /*pinned memory allocations. Paged-locked memory by gpuMallocHost is accessible to the device.*/ for (int streamId = 0; streamId < nGPUStreams; streamId++ ) { void *tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, (n) * sizeof(int_t) )) ; A_gpu->scubufs[streamId].usub_IndirectJ3_host = (int_t*) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, ( n) * sizeof(int_t) )); A_gpu->scubufs[streamId].usub_IndirectJ3 = (int_t*) tmp_ptr; gpu_mem_used += ( n) * sizeof(int_t); checkGPUErrors(gpuMallocHost( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info_host = (Remain_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info_host = (Ublock_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, remain_l_max * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].Remain_L_buff_host = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, bigu_size * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].bigU_host = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(doublecomplex) * (A_host->bufmax[1]))); A_gpu->acc_L_buff = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(doublecomplex) * (A_host->bufmax[3]))); A_gpu->acc_U_buff = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[0]))); A_gpu->scubufs[streamId].lsub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[2]))); A_gpu->scubufs[streamId].usub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, remain_l_max * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].Remain_L_buff = (doublecomplex *) tmp_ptr; gpu_mem_used += remain_l_max * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, bigu_size * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].bigU = (doublecomplex *) tmp_ptr; gpu_mem_used += bigu_size * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info = (Ublock_info_t *) tmp_ptr; gpu_mem_used += mcb * sizeof(Ublock_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info = (Remain_info_t *) tmp_ptr; gpu_mem_used += mrb * sizeof(Remain_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, buffer_size * sizeof(doublecomplex))) ; A_gpu->scubufs[streamId].bigV = (doublecomplex *) tmp_ptr; gpu_mem_used += buffer_size * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[0]*sizeof(int_t))) ; A_gpu->scubufs[streamId].lsub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[0] * sizeof(int_t); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[2]*sizeof(int_t))) ; A_gpu->scubufs[streamId].usub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[2] * sizeof(int_t); } /* endfor streamID ... allocate paged-locked memory */ stat->isOffloaded = (int *) SUPERLU_MALLOC (sizeof(int) * nsupers); stat->GemmStart = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->GemmEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ScatterEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeH2D = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeD2H_Start = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeD2H_End = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); for (int i = 0; i < nsupers; ++i) { stat->isOffloaded[i] = 0; checkGPUErrors(gpuEventCreate(&(stat->GemmStart[i]))); checkGPUErrors(gpuEventCreate(&(stat->GemmEnd[i]))); checkGPUErrors(gpuEventCreate(&(stat->ScatterEnd[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeD2H_Start[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeD2H_End[i]))); } /*---- Copy L data structure to GPU ----*/ /*pointers and address of local blocks for easy accessibility */ local_l_blk_info_t *local_l_blk_infoVec; int_t * local_l_blk_infoPtr; local_l_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pc) * sizeof(int_t ) ); /* First pass: count total L blocks */ int_t cum_num_l_blocks = 0; /* total number of L blocks I own */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { /* going through each block column I own */ if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; cum_num_l_blocks += num_l_blocks; } } /*allocating memory*/ local_l_blk_infoVec = (local_l_blk_info_t *) malloc(cum_num_l_blocks * sizeof(local_l_blk_info_t)); /* Second pass: set up the meta-data for the L structure */ cum_num_l_blocks = 0; /*initialzing vectors */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; /* # L blocks in this column */ if (num_l_blocks > 0) { local_l_blk_info_t *local_l_blk_info_i = local_l_blk_infoVec + cum_num_l_blocks; local_l_blk_infoPtr[i] = cum_num_l_blocks; int_t lptrj = BC_HEADER; int_t luptrj = 0; for (int_t j = 0; j < num_l_blocks ; ++j) { int_t ijb = index[lptrj]; local_l_blk_info_i[j].lib = ijb / Pr; local_l_blk_info_i[j].lptrj = lptrj; local_l_blk_info_i[j].luptrj = luptrj; luptrj += index[lptrj + 1]; lptrj += LB_DESCRIPTOR + index[lptrj + 1]; } } cum_num_l_blocks += num_l_blocks; } } /* endfor all block columns */ /* Allocate L memory on GPU, and copy the values from CPU to GPU */ checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_l_blocks * sizeof(local_l_blk_info_t))) ; A_gpu->local_l_blk_infoVec = (local_l_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_l_blocks * sizeof(local_l_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoVec), local_l_blk_infoVec, cum_num_l_blocks * sizeof(local_l_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pc)*sizeof(int_t))) ; A_gpu->local_l_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pc) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoPtr), local_l_blk_infoPtr, CEILING(nsupers, Pc)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /*---- Copy U data structure to GPU ----*/ local_u_blk_info_t *local_u_blk_infoVec; int_t * local_u_blk_infoPtr; local_u_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pr) * sizeof(int_t ) ); /* First pass: count total U blocks */ int_t cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; cum_num_u_blocks += num_u_blocks; } } local_u_blk_infoVec = (local_u_blk_info_t *) malloc(cum_num_u_blocks * sizeof(local_u_blk_info_t)); /* Second pass: set up the meta-data for the U structure */ cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; if (num_u_blocks > 0) { local_u_blk_info_t *local_u_blk_info_i = local_u_blk_infoVec + cum_num_u_blocks; local_u_blk_infoPtr[i] = cum_num_u_blocks; int_t iuip_lib, ruip_lib; iuip_lib = BR_HEADER; ruip_lib = 0; for (int_t j = 0; j < num_u_blocks ; ++j) { int_t ijb = index[iuip_lib]; local_u_blk_info_i[j].ljb = ijb / Pc; local_u_blk_info_i[j].iuip = iuip_lib; local_u_blk_info_i[j].ruip = ruip_lib; ruip_lib += index[iuip_lib + 1]; iuip_lib += UB_DESCRIPTOR + SuperSize (ijb); } } cum_num_u_blocks += num_u_blocks; } } checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_u_blocks * sizeof(local_u_blk_info_t))) ; A_gpu->local_u_blk_infoVec = (local_u_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_u_blocks * sizeof(local_u_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoVec), local_u_blk_infoVec, cum_num_u_blocks * sizeof(local_u_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pr)*sizeof(int_t))) ; A_gpu->local_u_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pr) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoPtr), local_u_blk_infoPtr, CEILING(nsupers, Pr)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /* Copy the actual L indices and values */ int_t l_k = CEILING( nsupers, grid->npcol ); /* # of local block columns */ int_t *temp_LrowindPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *temp_LnzvalPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *Lnzval_size = (int_t *) malloc(sizeof(int_t) * l_k); int_t l_ind_len = 0; int_t l_val_len = 0; for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; temp_LrowindPtr[ljb] = l_ind_len; temp_LnzvalPtr[ljb] = l_val_len; // ### Lnzval_size[ljb] = 0; //### if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; /* Global block number is mycol + ljb*Pc */ int_t nsupc = SuperSize(jb); l_ind_len += len1; l_val_len += len * nsupc; Lnzval_size[ljb] = len * nsupc ; // ### } else { Lnzval_size[ljb] = 0 ; // ### } } } /* endfor jb = 0 ... */ /* Copy the actual U indices and values */ int_t u_k = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ int_t *temp_UrowindPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *temp_UnzvalPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *Unzval_size = (int_t *) malloc(sizeof(int_t) * u_k); int_t u_ind_len = 0; int_t u_val_len = 0; for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; temp_UrowindPtr[lb] = u_ind_len; temp_UnzvalPtr[lb] = u_val_len; Unzval_size[lb] = 0; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len = index_host[1]; int_t len1 = index_host[2]; u_ind_len += len1; u_val_len += len; Unzval_size[lb] = len; } else { Unzval_size[lb] = 0; } } gpu_mem_used += l_ind_len * sizeof(int_t); gpu_mem_used += 2 * l_k * sizeof(int_t); gpu_mem_used += u_ind_len * sizeof(int_t); gpu_mem_used += 2 * u_k * sizeof(int_t); /*left memory shall be divided among the two */ for (int_t i = 0; i < l_k; ++i) { temp_LnzvalPtr[i] = -1; } for (int_t i = 0; i < u_k; ++i) { temp_UnzvalPtr[i] = -1; } /*setting these pointers back */ l_val_len = 0; u_val_len = 0; int_t num_gpu_l_blocks = 0; int_t num_gpu_u_blocks = 0; size_t mem_l_block, mem_u_block; /* Find the trailing matrix size that can fit into GPU memory */ for (int_t i = nsupers - 1; i > -1; --i) { /* ulte se chalte hai eleimination tree */ /* bottom up ordering */ int_t i_sup = A_gpu->perm_c_supno[i]; int_t pc = PCOL( i_sup, grid ); if (isNodeInMyGrid[i_sup] == 1) { if (mycol == pc ) { int_t ljb = LBj(i_sup, grid); mem_l_block = sizeof(doublecomplex) * Lnzval_size[ljb]; if (gpu_mem_used + mem_l_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_l_block; temp_LnzvalPtr[ljb] = l_val_len; l_val_len += Lnzval_size[ljb]; num_gpu_l_blocks++; A_gpu->first_l_block_gpu = i; } } int_t pr = PROW( i_sup, grid ); if (myrow == pr) { int_t lib = LBi(i_sup, grid); mem_u_block = sizeof(doublecomplex) * Unzval_size[lib]; if (gpu_mem_used + mem_u_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_u_block; temp_UnzvalPtr[lib] = u_val_len; u_val_len += Unzval_size[lib]; num_gpu_u_blocks++; A_gpu->first_u_block_gpu = i; } } } /* endif */ } /* endfor i .... nsupers */ #if (PRNTlevel>=2) printf("(%d) Number of L blocks in GPU %d, U blocks %d\n", grid3d->iam, num_gpu_l_blocks, num_gpu_u_blocks ); printf("(%d) elimination order of first block in GPU: L block %d, U block %d\n", grid3d->iam, A_gpu->first_l_block_gpu, A_gpu->first_u_block_gpu); printf("(%d) Memory of L %.1f GB, memory for U %.1f GB, Total device memory used %.1f GB, Memory allowed %.1f GB \n", grid3d->iam, l_val_len * sizeof(doublecomplex) * 1e-9, u_val_len * sizeof(doublecomplex) * 1e-9, gpu_mem_used * 1e-9, max_gpu_memory * 1e-9); fflush(stdout); #endif /* Assemble index vector on temp */ int_t *indtemp = (int_t *) malloc(sizeof(int_t) * l_ind_len); for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; memcpy(&indtemp[temp_LrowindPtr[ljb]] , index_host, len1 * sizeof(int_t)) ; } } } checkGPUErrors(gpuMalloc( &tmp_ptr, l_ind_len * sizeof(int_t))) ; A_gpu->LrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindVec), indtemp, l_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_val_len * sizeof(doublecomplex))); A_gpu->LnzvalVec = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->LnzvalVec), 0, l_val_len * sizeof(doublecomplex))); checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindPtr), temp_LrowindPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LnzvalPtr), temp_LnzvalPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->LnzvalPtr_host = temp_LnzvalPtr; int_t *indtemp1 = (int_t *) malloc(sizeof(int_t) * u_ind_len); for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len1 = index_host[2]; memcpy(&indtemp1[temp_UrowindPtr[lb]] , index_host, sizeof(int_t)*len1); } } checkGPUErrors(gpuMalloc( &tmp_ptr, u_ind_len * sizeof(int_t))) ; A_gpu->UrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindVec), indtemp1, u_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, u_val_len * sizeof(doublecomplex))); A_gpu->UnzvalVec = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->UnzvalVec), 0, u_val_len * sizeof(doublecomplex))); checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindPtr), temp_UrowindPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->UnzvalPtr_host = temp_UnzvalPtr; checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UnzvalPtr), temp_UnzvalPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, (nsupers + 1)*sizeof(int_t))) ; A_gpu->xsup = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->xsup), xsup, (nsupers + 1)*sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, sizeof(zLUstruct_gpu_t))) ; *dA_gpu = (zLUstruct_gpu_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( *dA_gpu, A_gpu, sizeof(zLUstruct_gpu_t), gpuMemcpyHostToDevice)) ; free (temp_LrowindPtr); free (temp_UrowindPtr); free (indtemp1); free (indtemp); } /* end zCopyLUToGPU3D */ int zreduceAllAncestors3d_GPU ( int_t ilvl, int_t* myNodeCount, int_t** treePerm, zLUValSubBuf_t*LUvsb, zLUstruct_t* LUstruct, gridinfo3d_t* grid3d, zsluGPU_t *sluGPU, d2Hreduce_t* d2Hred, factStat_t *factStat, HyP_t* HyP, SCT_t* SCT, SuperLUStat_t *stat ) { // first synchronize all gpu streams int superlu_acc_offload = HyP->superlu_acc_offload; int_t maxLvl = log2i( (int_t) grid3d->zscp.Np) + 1; int_t myGrid = grid3d->zscp.Iam; gridinfo_t* grid = &(grid3d->grid2d); int_t* gpuLUreduced = factStat->gpuLUreduced; int_t sender; if ((myGrid % (1 << (ilvl + 1))) == 0) { sender = myGrid + (1 << ilvl); } else { sender = myGrid; } /*Reduce all the ancestors from the GPU*/ if (myGrid == sender && superlu_acc_offload) { for (int_t streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } for (int_t alvl = ilvl + 1; alvl < maxLvl; ++alvl) { /* code */ // int_t atree = myTreeIdxs[alvl]; int_t nsAncestor = myNodeCount[alvl]; int_t* cAncestorList = treePerm[alvl]; for (int_t node = 0; node < nsAncestor; node++ ) { int_t k = cAncestorList[node]; if (!gpuLUreduced[k]) { zinitD2Hreduce(k, d2Hred, 1, HyP, sluGPU, grid, LUstruct, SCT); int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; double tt_start1 = SuperLU_timer_(); SCT->PhiMemCpyTimer += SuperLU_timer_() - tt_start1; if (copyL_kljb || copyU_kljb) SCT->PhiMemCpyCounter++; zsendLUpanelGPU2HOST(k, d2Hred, sluGPU, stat); /* Reduce the LU panels from GPU */ zreduceGPUlu(1, d2Hred, sluGPU, SCT, grid, LUstruct); gpuLUreduced[k] = 1; } } } } /*if (myGrid == sender)*/ zreduceAllAncestors3d(ilvl, myNodeCount, treePerm, LUvsb, LUstruct, grid3d, SCT ); return 0; } /* zreduceAllAncestors3d_GPU */ void zsyncAllfunCallStreams(zsluGPU_t* sluGPU, SCT_t* SCT) { for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } }
85992a85f70b23853dc755273ecf365720e082b6.cu
/*! @file * \brief Descriptions and declarations for structures used in GPU * * <pre> * -- Distributed SuperLU routine (version 7.2) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley, * Georgia Institute of Technology, Oak Ridge National Laboratory * * Last update: November 14, 2021 remove dependence on CUB/scan * </pre> */ //#define GPU_DEBUG #include "superlu_defs.h" #undef Reduce //#include <thrust/system/cuda/detail/cub/cub.cuh> #include "zlustruct_gpu.h" #ifdef HAVE_HIP #include "superlu_gpu_utils.hip.cpp" #endif #include "dcomplex.h" //extern "C" { // void cblas_daxpy(const int N, const double alpha, const double *X, // const int incX, double *Y, const int incY); //} // gpublasStatus_t checkGPUblas(gpublasStatus_t result) // { // #if defined(DEBUG) || defined(_DEBUG) // if (result != GPUBLAS_STATUS_SUCCESS) // { // fprintf(stderr, "GPU BLAS Runtime Error: %s\n", gpublasGetErrorString(result)); // assert(result == GPUBLAS_STATUS_SUCCESS); // } // #endif // return result; // } // #define UNIT_STRIDE #if 0 ////////// this routine is not used anymore __device__ inline void device_scatter_l (int_t thread_id, int_t nsupc, int_t temp_nbrow, int_t *usub, int_t iukp, int_t klst, doublecomplex *nzval, int_t ldv, doublecomplex *tempv, int_t nbrow, // int_t *indirect2_thread int *indirect2_thread ) { int_t segsize, jj; for (jj = 0; jj < nsupc; ++jj) { segsize = klst - usub[iukp + jj]; if (segsize) { if (thread_id < temp_nbrow) { #ifndef UNIT_STRIDE nzval[indirect2_thread[thread_id]] -= tempv[thread_id]; #else nzval[thread_id] -= tempv[thread_id]; /*making access unit strided*/ #endif } tempv += nbrow; } nzval += ldv; } } #endif ///////////// not used //#define THREAD_BLOCK_SIZE 256 /* Sherry: was 192. should be <= MAX_SUPER_SIZE */ __device__ inline void zdevice_scatter_l_2D (int thread_id, int nsupc, int temp_nbrow, int_t *usub, int iukp, int_t klst, doublecomplex *nzval, int ldv, const doublecomplex *tempv, int nbrow, int *indirect2_thread, int nnz_cols, int ColPerBlock, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #define UNROLL_ITER 8 #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = ldv * IndirectJ3[col] + indirect2_thread[thread_id_x]; z_sub(&nzval[i], &nzval[i], &tempv[nbrow * col + thread_id_x]); } } } /* Sherry: this routine is not used */ #if 0 ////////////////////////////////////////////// __global__ void cub_scan_test(void) { int thread_id = threadIdx.x; typedef cub::BlockScan<int, MAX_SUPER_SIZE > BlockScan; /*1D int data type*/ __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/ __shared__ int IndirectJ1[MAX_SUPER_SIZE]; __shared__ int IndirectJ2[MAX_SUPER_SIZE]; if (thread_id < MAX_SUPER_SIZE) { IndirectJ1[thread_id] = (thread_id + 1) % 2; } __syncthreads(); if (thread_id < MAX_SUPER_SIZE) BlockScan(temp_storage).InclusiveSum (IndirectJ1[thread_id], IndirectJ2[thread_id]); if (thread_id < MAX_SUPER_SIZE) printf("%d %d\n", thread_id, IndirectJ2[thread_id]); } #endif /////////////////////////////////// not used __device__ inline void device_scatter_u_2D (int thread_id, int temp_nbrow, int nsupc, doublecomplex * ucol, int_t * usub, int iukp, int_t ilst, int_t klst, int_t * index, int iuip_lib, doublecomplex * tempv, int nbrow, int *indirect, int nnz_cols, int ColPerBlock, int *IndirectJ1, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { /* 1D threads are logically arranged in 2D shape. */ int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = IndirectJ1[IndirectJ3[col]]-ilst + indirect[thread_id_x]; z_sub(&ucol[i], &ucol[i], &tempv[nbrow * col + thread_id_x]); } } } __global__ void Scatter_GPU_kernel( int_t streamId, int_t ii_st, int_t ii_end, int_t jj_st, int_t jj_end, /* defines rectangular Schur block to be scatter */ int_t klst, int_t jj0, /* 0 on entry */ int_t nrows, int_t ldt, int_t npcol, int_t nprow, zLUstruct_gpu_t * A_gpu) { /* initializing pointers */ int_t *xsup = A_gpu->xsup; int_t *UrowindPtr = A_gpu->UrowindPtr; int_t *UrowindVec = A_gpu->UrowindVec; int_t *UnzvalPtr = A_gpu->UnzvalPtr; doublecomplex *UnzvalVec = A_gpu->UnzvalVec; int_t *LrowindPtr = A_gpu->LrowindPtr; int_t *LrowindVec = A_gpu->LrowindVec; int_t *LnzvalPtr = A_gpu->LnzvalPtr; doublecomplex *LnzvalVec = A_gpu->LnzvalVec; doublecomplex *bigV = A_gpu->scubufs[streamId].bigV; local_l_blk_info_t *local_l_blk_infoVec = A_gpu->local_l_blk_infoVec; local_u_blk_info_t *local_u_blk_infoVec = A_gpu->local_u_blk_infoVec; int_t *local_l_blk_infoPtr = A_gpu->local_l_blk_infoPtr; int_t *local_u_blk_infoPtr = A_gpu->local_u_blk_infoPtr; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info; int_t *lsub = A_gpu->scubufs[streamId].lsub; int_t *usub = A_gpu->scubufs[streamId].usub; /* thread block assignment: this thread block is assigned to block (lb, j) in 2D grid */ int lb = blockIdx.x + ii_st; int j = blockIdx.y + jj_st; extern __shared__ int s[]; int* indirect_lptr = s; /* row-wise */ int* indirect2_thread= (int*) &indirect_lptr[ldt]; /* row-wise */ int* IndirectJ1= (int*) &indirect2_thread[ldt]; /* column-wise */ int* IndirectJ3= (int*) &IndirectJ1[ldt]; /* column-wise */ //int THREAD_BLOCK_SIZE =ldt; int* pfxStorage = (int*) &IndirectJ3[ldt]; int thread_id = threadIdx.x; int iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize (jb); int ljb = jb / npcol; typedef int pfx_dtype ; extern __device__ void incScan(pfx_dtype *inOutArr, pfx_dtype *temp, int n); doublecomplex *tempv1; if (jj_st == jj0) { tempv1 = (j == jj_st) ? bigV : bigV + Ublock_info[j - 1].full_u_cols * nrows; } else { tempv1 = (j == jj_st) ? bigV : bigV + (Ublock_info[j - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols) * nrows; } /* # of nonzero columns in block j */ int nnz_cols = (j == 0) ? Ublock_info[j].full_u_cols : (Ublock_info[j].full_u_cols - Ublock_info[j - 1].full_u_cols); int cum_ncol = (j == 0) ? 0 : Ublock_info[j - 1].full_u_cols; int lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr + 1]; /* number of rows in the current L block */ lptr += LB_DESCRIPTOR; int_t cum_nrow; if (ii_st == 0) { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow); } else { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow - Remain_info[ii_st - 1].FullRow); } tempv1 += cum_nrow; if (ib < jb) /*scatter U code */ { int ilst = FstBlockC (ib + 1); int lib = ib / nprow; /* local index of row block ib */ int_t *index = &UrowindVec[UrowindPtr[lib]]; int num_u_blocks = index[0]; int ljb = (jb) / npcol; /* local index of column block jb */ /* Each thread is responsible for one block column */ __shared__ int ljb_ind; /*do a search ljb_ind at local row lib*/ int blks_per_threads = CEILING(num_u_blocks, blockDim.x); // printf("blockDim.x =%d \n", blockDim.x); for (int i = 0; i < blks_per_threads; ++i) /* each thread is assigned a chunk of consecutive U blocks to search */ { /* only one thread finds the block index matching ljb */ if (thread_id * blks_per_threads + i < num_u_blocks && local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + thread_id * blks_per_threads + i ].ljb == ljb) { ljb_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int iuip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].iuip; int ruip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].ruip; iuip_lib += UB_DESCRIPTOR; doublecomplex *Unzval_lib = &UnzvalVec[UnzvalPtr[lib]]; doublecomplex *ucol = &Unzval_lib[ruip_lib]; if (thread_id < temp_nbrow) /* row-wise */ { /* cyclically map each thread to a row */ indirect_lptr[thread_id] = (int) lsub[lptr + thread_id]; } /* column-wise: each thread is assigned one column */ if (thread_id < nnz_cols) IndirectJ3[thread_id] = A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ __syncthreads(); /* threads are divided into multiple columns */ int ColPerBlock = blockDim.x / temp_nbrow; // if (thread_id < blockDim.x) // IndirectJ1[thread_id] = 0; if (thread_id < ldt) IndirectJ1[thread_id] = 0; if (thread_id < blockDim.x) { if (thread_id < nsupc) { /* fstnz subscript of each column in the block */ IndirectJ1[thread_id] = -index[iuip_lib + thread_id] + ilst; } } /* perform an inclusive block-wide prefix sum among all threads */ __syncthreads(); incScan(IndirectJ1, pfxStorage, nsupc); __syncthreads(); device_scatter_u_2D ( thread_id, temp_nbrow, nsupc, ucol, usub, iukp, ilst, klst, index, iuip_lib, tempv1, nrows, indirect_lptr, nnz_cols, ColPerBlock, IndirectJ1, IndirectJ3 ); } else /* ib >= jb, scatter L code */ { int rel; doublecomplex *nzval; int_t *index = &LrowindVec[LrowindPtr[ljb]]; int num_l_blocks = index[0]; int ldv = index[1]; int fnz = FstBlockC (ib); int lib = ib / nprow; __shared__ int lib_ind; /*do a search lib_ind for lib*/ int blks_per_threads = CEILING(num_l_blocks, blockDim.x); for (int i = 0; i < blks_per_threads; ++i) { if (thread_id * blks_per_threads + i < num_l_blocks && local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + thread_id * blks_per_threads + i ].lib == lib) { lib_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int lptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].lptrj; int luptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].luptrj; lptrj += LB_DESCRIPTOR; int dest_nbrow = index[lptrj - 1]; if (thread_id < dest_nbrow) { rel = index[lptrj + thread_id] - fnz; indirect_lptr[rel] = thread_id; } __syncthreads(); /* can be precalculated */ if (thread_id < temp_nbrow) { rel = lsub[lptr + thread_id] - fnz; indirect2_thread[thread_id] = indirect_lptr[rel]; } if (thread_id < nnz_cols) IndirectJ3[thread_id] = (int) A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; __syncthreads(); int ColPerBlock = blockDim.x / temp_nbrow; nzval = &LnzvalVec[LnzvalPtr[ljb]] + luptrj; zdevice_scatter_l_2D( thread_id, nsupc, temp_nbrow, usub, iukp, klst, nzval, ldv, tempv1, nrows, indirect2_thread, nnz_cols, ColPerBlock, IndirectJ3); } /* end else ib >= jb */ } /* end Scatter_GPU_kernel */ #define GPU_2D_SCHUDT /* Not used */ int zSchurCompUpdate_GPU( int_t streamId, int_t jj_cpu, /* 0 on entry, pointing to the start of Phi part */ int_t nub, /* jj_cpu on entry, pointing to the end of the Phi part */ int_t klst, int_t knsupc, int_t Rnbrow, int_t RemainBlk, int_t Remain_lbuf_send_size, int_t bigu_send_size, int_t ldu, int_t mcb, /* num_u_blks_hi */ int_t buffer_size, int_t lsub_len, int_t usub_len, int_t ldt, int_t k0, zsluGPU_t *sluGPU, gridinfo_t *grid, SuperLUStat_t *stat ) { int SCATTER_THREAD_BLOCK_SIZE=512; zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; zLUstruct_gpu_t * dA_gpu = sluGPU->dA_gpu; int_t nprow = grid->nprow; int_t npcol = grid->npcol; gpuStream_t FunCallStream = sluGPU->funCallStreams[streamId]; gpublasHandle_t gpublas_handle0 = sluGPU->gpublasHandles[streamId]; int_t * lsub = A_gpu->scubufs[streamId].lsub_buf; int_t * usub = A_gpu->scubufs[streamId].usub_buf; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info_host; doublecomplex * Remain_L_buff = A_gpu->scubufs[streamId].Remain_L_buff_host; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info_host; doublecomplex * bigU = A_gpu->scubufs[streamId].bigU_host; stat->isOffloaded[k0] = 1; /* start by sending data to */ int_t *xsup = A_gpu->xsup_host; int_t col_back = (jj_cpu == 0) ? 0 : Ublock_info[jj_cpu - 1].full_u_cols; // if(nub<1) return; int_t ncols = Ublock_info[nub - 1].full_u_cols - col_back; /* Sherry: can get max_super_size from sp_ienv(3) */ int_t indirectJ1[MAX_SUPER_SIZE]; // 0 indicates an empry segment int_t indirectJ2[MAX_SUPER_SIZE]; // # of nonzero segments so far int_t indirectJ3[MAX_SUPER_SIZE]; /* indirectJ3[j] == k means the j-th nonzero segment points to column k in this supernode */ /* calculate usub_indirect */ for (int jj = jj_cpu; jj < nub; ++jj) { int_t iukp = Ublock_info[jj].iukp; int_t jb = Ublock_info[jj].jb; int_t nsupc = SuperSize (jb); int_t addr = (jj == 0) ? 0 : Ublock_info[jj - 1].full_u_cols - col_back; for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ1[kk] = 0; } for (int_t kk = 0; kk < nsupc; ++kk) { indirectJ1[kk] = ((klst - usub[iukp + kk]) == 0) ? 0 : 1; } /*prefix sum - indicates # of nonzero segments up to column kk */ indirectJ2[0] = indirectJ1[0]; for (int_t kk = 1; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ2[kk] = indirectJ2[kk - 1] + indirectJ1[kk]; } /* total number of nonzero segments in this supernode */ int nnz_col = indirectJ2[nsupc - 1]; // old: MAX_SUPER_SIZE /* compactation */ for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { if (indirectJ1[kk]) /* kk is a nonzero segment */ { /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ indirectJ3[indirectJ2[kk] - 1] = kk; } } for (int i = 0; i < nnz_col; ++i) { /* addr == total # of full columns before current block jj */ A_gpu->scubufs[streamId].usub_IndirectJ3_host[addr + i] = indirectJ3[i]; } } /* end for jj ... calculate usub_indirect */ //printf("zSchurCompUpdate_GPU[3]: jj_cpu %d, nub %d\n", jj_cpu, nub); fflush(stdout); /*sizeof RemainLbuf = Rnbuf*knsupc */ double tTmp = SuperLU_timer_(); gpuEventRecord(stat->ePCIeH2D[k0], FunCallStream); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub_IndirectJ3, A_gpu->scubufs[streamId].usub_IndirectJ3_host, ncols * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_L_buff, Remain_L_buff, Remain_lbuf_send_size * sizeof(doublecomplex), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].bigU, bigU, bigu_send_size * sizeof(doublecomplex), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_info, Remain_info, RemainBlk * sizeof(Remain_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Ublock_info, Ublock_info, mcb * sizeof(Ublock_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].lsub, lsub, lsub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub, usub, usub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); stat->tHost_PCIeH2D += SuperLU_timer_() - tTmp; stat->cPCIeH2D += Remain_lbuf_send_size * sizeof(doublecomplex) + bigu_send_size * sizeof(doublecomplex) + RemainBlk * sizeof(Remain_info_t) + mcb * sizeof(Ublock_info_t) + lsub_len * sizeof(int_t) + usub_len * sizeof(int_t); doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0}; /* The following are used in gpublasZgemm() call */ gpuDoubleComplex *cu_alpha = (gpuDoubleComplex *) &alpha; gpuDoubleComplex *cu_beta = (gpuDoubleComplex *) &beta; gpuDoubleComplex *cu_A, *cu_B, *cu_C; /* C <- A*B */ int_t ii_st = 0; int_t ii_end = 0; int_t maxGemmBlockDim = (int) sqrt(buffer_size); // int_t maxGemmBlockDim = 8000; /* Organize GEMM by blocks of [ii_st : ii_end, jj_st : jj_end] that fits in the buffer_size */ while (ii_end < RemainBlk) { ii_st = ii_end; ii_end = RemainBlk; int_t nrow_max = maxGemmBlockDim; // nrow_max = Rnbrow; int_t remaining_rows = (ii_st == 0) ? Rnbrow : Rnbrow - Remain_info[ii_st - 1].FullRow; nrow_max = (remaining_rows / nrow_max) > 0 ? remaining_rows / CEILING(remaining_rows, nrow_max) : nrow_max; int_t ResRow = (ii_st == 0) ? 0 : Remain_info[ii_st - 1].FullRow; for (int_t i = ii_st; i < RemainBlk - 1; ++i) { if ( Remain_info[i + 1].FullRow > ResRow + nrow_max) { ii_end = i; break; /* row dimension reaches nrow_max */ } } int_t nrows; /* actual row dimension for GEMM */ int_t st_row; if (ii_st > 0) { nrows = Remain_info[ii_end - 1].FullRow - Remain_info[ii_st - 1].FullRow; st_row = Remain_info[ii_st - 1].FullRow; } else { nrows = Remain_info[ii_end - 1].FullRow; st_row = 0; } int jj_st = jj_cpu; int jj_end = jj_cpu; while (jj_end < nub && nrows > 0 ) { int_t remaining_cols = (jj_st == jj_cpu) ? ncols : ncols - Ublock_info[jj_st - 1].full_u_cols; if ( remaining_cols * nrows < buffer_size) { jj_st = jj_end; jj_end = nub; } else /* C matrix cannot fit in buffer, need to break into pieces */ { int_t ncol_max = buffer_size / nrows; /** Must revisit **/ ncol_max = SUPERLU_MIN(ncol_max, maxGemmBlockDim); ncol_max = (remaining_cols / ncol_max) > 0 ? remaining_cols / CEILING(remaining_cols, ncol_max) : ncol_max; jj_st = jj_end; jj_end = nub; int_t ResCol = (jj_st == 0) ? 0 : Ublock_info[jj_st - 1].full_u_cols; for (int_t j = jj_st; j < nub - 1; ++j) { if (Ublock_info[j + 1].full_u_cols > ResCol + ncol_max) { jj_end = j; break; } } } /* end-if-else */ int ncols; int st_col; if (jj_st > 0) { ncols = Ublock_info[jj_end - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols; st_col = Ublock_info[jj_st - 1].full_u_cols; if (ncols == 0) exit(0); } else { ncols = Ublock_info[jj_end - 1].full_u_cols; st_col = 0; } /* none of the matrix dimension is zero. */ if (nrows > 0 && ldu > 0 && ncols > 0) { if (nrows * ncols > buffer_size) { printf("!! Matrix size %lld x %lld exceeds buffer_size %lld\n", nrows, ncols, buffer_size); fflush(stdout); } assert(nrows * ncols <= buffer_size); gpublasSetStream(gpublas_handle0, FunCallStream); gpuEventRecord(stat->GemmStart[k0], FunCallStream); cu_A = (gpuDoubleComplex*) &A_gpu->scubufs[streamId].Remain_L_buff[(knsupc - ldu) * Rnbrow + st_row]; cu_B = (gpuDoubleComplex*) &A_gpu->scubufs[streamId].bigU[st_col * ldu]; cu_C = (gpuDoubleComplex*) A_gpu->scubufs[streamId].bigV; gpublasZgemm(gpublas_handle0, GPUBLAS_OP_N, GPUBLAS_OP_N, nrows, ncols, ldu, cu_alpha, cu_A, Rnbrow, cu_B, ldu, cu_beta, cu_C, nrows); // #define SCATTER_OPT #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchronous #endif gpuEventRecord(stat->GemmEnd[k0], FunCallStream); stat->GemmFLOPCounter += 8.0 * (double) nrows * ncols * ldu; /* * Scattering the output */ // dim3 dimBlock(THREAD_BLOCK_SIZE); // 1d thread dim3 dimBlock(ldt); // 1d thread dim3 dimGrid(ii_end - ii_st, jj_end - jj_st); Scatter_GPU_kernel <<< dimGrid, dimBlock, (4*ldt + 2*SCATTER_THREAD_BLOCK_SIZE)*sizeof(int), FunCallStream>>> (streamId, ii_st, ii_end, jj_st, jj_end, klst, 0, nrows, ldt, npcol, nprow, dA_gpu); #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchrnous #endif gpuEventRecord(stat->ScatterEnd[k0], FunCallStream); stat->ScatterMOPCounter += 3.0 * (double) nrows * ncols; } /* endif ... none of the matrix dimension is zero. */ } /* end while jj_end < nub */ } /* end while (ii_end < RemainBlk) */ return 0; } /* end zSchurCompUpdate_GPU */ static void print_occupancy() { int blockSize; // The launch configurator returned block size int minGridSize; /* The minimum grid size needed to achieve the best potential occupancy */ gpuOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, Scatter_GPU_kernel, 0, 0); printf("Occupancy: MinGridSize %d blocksize %d \n", minGridSize, blockSize); } static void printDevProp(gpuDeviceProp devProp) { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); printf("pciBusID: %d\n", devProp.pciBusID); printf("pciDeviceID: %d\n", devProp.pciDeviceID); printf("GPU Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total free memory: %zu\n", mfree); printf("Clock rate: %d\n", devProp.clockRate); return; } static size_t get_acc_memory () { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); #if 0 printf("Total memory %zu & free memory %zu\n", mtotal, mfree); #endif return (size_t) (0.9 * (double) mfree) / get_mpi_process_per_gpu (); } /* Free all the data structures allocated on GPU. This routine is called from Host */ int zfree_LUstruct_gpu ( zsluGPU_t * sluGPU, SuperLUStat_t* stat ) { zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; int streamId = 0; /* Free the L data structure on GPU */ checkGPU(gpuFree(A_gpu->LrowindVec)); checkGPU(gpuFree(A_gpu->LrowindPtr)); checkGPU(gpuFree(A_gpu->LnzvalVec)); checkGPU(gpuFree(A_gpu->LnzvalPtr)); free(A_gpu->LnzvalPtr_host); /*freeing the pinned memory*/ checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Ublock_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_L_buff_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].bigU_host)); checkGPU(gpuFreeHost(A_gpu->acc_L_buff)); checkGPU(gpuFreeHost(A_gpu->acc_U_buff)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].lsub_buf)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].usub_buf)); SUPERLU_FREE(stat->isOffloaded); // changed to SUPERLU_MALLOC/SUPERLU_FREE SUPERLU_FREE(stat->GemmStart); SUPERLU_FREE(stat->GemmEnd); SUPERLU_FREE(stat->ScatterEnd); SUPERLU_FREE(stat->ePCIeH2D); SUPERLU_FREE(stat->ePCIeD2H_Start); SUPERLU_FREE(stat->ePCIeD2H_End); /* Free the U data structure on GPU */ checkGPU(gpuFree(A_gpu->UrowindVec)); checkGPU(gpuFree(A_gpu->UrowindPtr)); //free(A_gpu->UrowindPtr_host); // Sherry: this is NOT allocated checkGPU(gpuFree(A_gpu->UnzvalVec)); checkGPU(gpuFree(A_gpu->UnzvalPtr)); checkGPU(gpuFree(A_gpu->grid)); /* Free the Schur complement structure on GPU */ checkGPU(gpuFree(A_gpu->scubufs[streamId].bigV)); checkGPU(gpuFree(A_gpu->scubufs[streamId].bigU)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_L_buff)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Ublock_info)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_info)); // checkGPU(gpuFree(A_gpu->indirect)); // checkGPU(gpuFree(A_gpu->indirect2)); checkGPU(gpuFree(A_gpu->xsup)); checkGPU(gpuFree(A_gpu->scubufs[streamId].lsub)); checkGPU(gpuFree(A_gpu->scubufs[streamId].usub)); checkGPU(gpuFree(A_gpu->local_l_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_l_blk_infoPtr)); checkGPU(gpuFree(A_gpu->jib_lookupVec)); checkGPU(gpuFree(A_gpu->jib_lookupPtr)); checkGPU(gpuFree(A_gpu->local_u_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_u_blk_infoPtr)); /* Destroy all the meta-structures associated with the streams. */ gpuStreamDestroy(sluGPU->CopyStream); for (streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamDestroy(sluGPU->funCallStreams[streamId]); gpublasDestroy(sluGPU->gpublasHandles[streamId]); } return 0; } /* end zfree_LUstruct_gpu */ void zPrint_matrix( char *desc, int_t m, int_t n, doublecomplex * dA, int_t lda ) { doublecomplex *cPtr = (doublecomplex *) malloc(sizeof(doublecomplex) * lda * n); checkGPU(gpuMemcpy( cPtr, dA, lda * n * sizeof(doublecomplex), gpuMemcpyDeviceToHost)) ; int_t i, j; printf( "\n %s\n", desc ); for ( i = 0; i < m; i++ ) { for ( j = 0; j < n; j++ ) printf( " %.3e", cPtr[i + j * lda] ); printf( "\n" ); } free(cPtr); } /* Initialize the GPU side of the data structure. */ int zinitSluGPU3D_t( zsluGPU_t *sluGPU, // LU structures on GPU, see zlustruct_gpu.h zLUstruct_t *LUstruct, gridinfo3d_t * grid3d, int_t* perm_c_supno, int_t n, int_t buffer_size, /* read from env variable SUPERLU_MAX_BUFFER_SIZE */ int_t bigu_size, int_t ldt, /* SUPERLU_MAXSUP read from sp_ienv(3) */ SuperLUStat_t *stat ) { checkGPUErrors(gpuDeviceReset ()); Glu_persist_t *Glu_persist = LUstruct->Glu_persist; zLocalLU_t *Llu = LUstruct->Llu; int* isNodeInMyGrid = sluGPU->isNodeInMyGrid; sluGPU->nGPUStreams = getnGPUStreams(); int SCATTER_THREAD_BLOCK_SIZE = ldt; if(getenv("SCATTER_THREAD_BLOCK_SIZE")) { int stbs = atoi(getenv("SCATTER_THREAD_BLOCK_SIZE")); if(stbs>=ldt) { SCATTER_THREAD_BLOCK_SIZE = stbs; } } if (grid3d->iam == 0) { printf("dinitSluGPU3D_t: Using hardware acceleration, with %d gpu streams \n", sluGPU->nGPUStreams); fflush(stdout); printf("dinitSluGPU3D_t: Using %d threads per block for scatter \n", SCATTER_THREAD_BLOCK_SIZE); if ( MAX_SUPER_SIZE < ldt ) { ABORT("MAX_SUPER_SIZE smaller than requested NSUP"); } } gpuStreamCreate(&(sluGPU->CopyStream)); for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamCreate(&(sluGPU->funCallStreams[streamId])); gpublasCreate(&(sluGPU->gpublasHandles[streamId])); sluGPU->lastOffloadStream[streamId] = -1; } sluGPU->A_gpu = (zLUstruct_gpu_t *) malloc (sizeof(zLUstruct_gpu_t)); sluGPU->A_gpu->perm_c_supno = perm_c_supno; /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. */ zCopyLUToGPU3D ( isNodeInMyGrid, Llu, /* referred to as A_host */ sluGPU, Glu_persist, n, grid3d, buffer_size, bigu_size, ldt, stat ); return 0; } /* end zinitSluGPU3D_t */ int zinitD2Hreduce( int next_k, d2Hreduce_t* d2Hred, int last_flag, HyP_t* HyP, zsluGPU_t *sluGPU, gridinfo_t *grid, zLUstruct_t *LUstruct, SCT_t* SCT ) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; zLocalLU_t *Llu = LUstruct->Llu; int_t* xsup = Glu_persist->xsup; int_t iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; // int_t next_col = SUPERLU_MIN (k0 + num_look_aheads + 1, nsupers - 1); // int_t next_k = perm_c_supno[next_col]; /* global block number for next colum*/ int_t mkcol, mkrow; int_t kljb = LBj( next_k, grid ); /*local block number for next block*/ int_t kijb = LBi( next_k, grid ); /*local block number for next block*/ int_t *kindexL ; /*for storing index vectors*/ int_t *kindexU ; mkrow = PROW (next_k, grid); mkcol = PCOL (next_k, grid); int_t ksup_size = SuperSize(next_k); int_t copyL_kljb = 0; int_t copyU_kljb = 0; int_t l_copy_len = 0; int_t u_copy_len = 0; if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL && last_flag) { if (HyP->Lblock_dirty_bit[kljb] > -1) { copyL_kljb = 1; int_t lastk0 = HyP->Lblock_dirty_bit[kljb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } kindexL = Lrowind_bc_ptr[kljb]; l_copy_len = kindexL[1] * ksup_size; } if ( mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL && last_flag ) { if (HyP->Ublock_dirty_bit[kijb] > -1) { copyU_kljb = 1; int_t lastk0 = HyP->Ublock_dirty_bit[kijb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } // copyU_kljb = HyP->Ublock_dirty_bit[kijb]>-1? 1: 0; kindexU = Ufstnz_br_ptr[kijb]; u_copy_len = kindexU[1]; } // wait for streams if they have not been finished // d2Hred->next_col = next_col; d2Hred->next_k = next_k; d2Hred->kljb = kljb; d2Hred->kijb = kijb; d2Hred->copyL_kljb = copyL_kljb; d2Hred->copyU_kljb = copyU_kljb; d2Hred->l_copy_len = l_copy_len; d2Hred->u_copy_len = u_copy_len; d2Hred->kindexU = kindexU; d2Hred->kindexL = kindexL; d2Hred->mkrow = mkrow; d2Hred->mkcol = mkcol; d2Hred->ksup_size = ksup_size; return 0; } /* zinitD2Hreduce */ int zreduceGPUlu( int last_flag, d2Hreduce_t* d2Hred, zsluGPU_t *sluGPU, SCT_t *SCT, gridinfo_t *grid, zLUstruct_t *LUstruct ) { zLocalLU_t *Llu = LUstruct->Llu; int iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; doublecomplex** Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; doublecomplex** Unzval_br_ptr = Llu->Unzval_br_ptr; gpuStream_t CopyStream; zLUstruct_gpu_t *A_gpu; A_gpu = sluGPU->A_gpu; CopyStream = sluGPU->CopyStream; int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t mkrow = d2Hred->mkrow; int_t mkcol = d2Hred->mkcol; int_t ksup_size = d2Hred->ksup_size; int_t *kindex; if ((copyL_kljb || copyU_kljb) && last_flag ) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(CopyStream); SCT->PhiWaitTimer_2 += SuperLU_timer_() - ttx; } double tt_start = SuperLU_timer_(); if (last_flag) { if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL ) { kindex = Lrowind_bc_ptr[kljb]; int_t len = kindex[1]; if (copyL_kljb) { doublecomplex *nzval_host; nzval_host = Lnzval_bc_ptr[kljb]; int_t llen = ksup_size * len; doublecomplex alpha = {1.0, 0.0}; superlu_zaxpy (llen, alpha, A_gpu->acc_L_buff, 1, nzval_host, 1); } } } if (last_flag) { if (mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL ) { kindex = Ufstnz_br_ptr[kijb]; int_t len = kindex[1]; if (copyU_kljb) { doublecomplex *nzval_host; nzval_host = Unzval_br_ptr[kijb]; doublecomplex alpha = {1.0, 0.0}; superlu_zaxpy (len, alpha, A_gpu->acc_U_buff, 1, nzval_host, 1); } } } double tt_end = SuperLU_timer_(); SCT->AssemblyTimer += tt_end - tt_start; return 0; } /* zreduceGPUlu */ int zwaitGPUscu(int streamId, zsluGPU_t *sluGPU, SCT_t *SCT) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; return 0; } int zsendLUpanelGPU2HOST( int_t k0, d2Hreduce_t* d2Hred, zsluGPU_t *sluGPU, SuperLUStat_t *stat ) { int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t l_copy_len = d2Hred->l_copy_len; int_t u_copy_len = d2Hred->u_copy_len; gpuStream_t CopyStream = sluGPU->CopyStream;; zLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; double tty = SuperLU_timer_(); gpuEventRecord(stat->ePCIeD2H_Start[k0], CopyStream); if (copyL_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_L_buff, &A_gpu->LnzvalVec[A_gpu->LnzvalPtr_host[kljb]], l_copy_len * sizeof(doublecomplex), gpuMemcpyDeviceToHost, CopyStream ) ); if (copyU_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_U_buff, &A_gpu->UnzvalVec[A_gpu->UnzvalPtr_host[kijb]], u_copy_len * sizeof(doublecomplex), gpuMemcpyDeviceToHost, CopyStream ) ); gpuEventRecord(stat->ePCIeD2H_End[k0], CopyStream); stat->tHost_PCIeD2H += SuperLU_timer_() - tty; stat->cPCIeD2H += u_copy_len * sizeof(doublecomplex) + l_copy_len * sizeof(doublecomplex); return 0; } /* end zsendLUpanelGPU2HOST */ /* Copy L and U panel data structures from host to the host part of the data structures in A_gpu. GPU is not involved in this routine. */ int zsendSCUdataHost2GPU( int_t streamId, int_t* lsub, int_t* usub, doublecomplex* bigU, int_t bigu_send_size, int_t Remain_lbuf_send_size, zsluGPU_t *sluGPU, HyP_t* HyP ) { //{printf("....[enter] zsendSCUdataHost2GPU, bigu_send_size %d\n", bigu_send_size); fflush(stdout);} int_t usub_len = usub[2]; int_t lsub_len = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; //{printf("....[2] in zsendSCUdataHost2GPU, lsub_len %d\n", lsub_len); fflush(stdout);} zLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; memcpy(A_gpu->scubufs[streamId].lsub_buf, lsub, sizeof(int_t)*lsub_len); memcpy(A_gpu->scubufs[streamId].usub_buf, usub, sizeof(int_t)*usub_len); memcpy(A_gpu->scubufs[streamId].Remain_info_host, HyP->Remain_info, sizeof(Remain_info_t)*HyP->RemainBlk); memcpy(A_gpu->scubufs[streamId].Ublock_info_host, HyP->Ublock_info_Phi, sizeof(Ublock_info_t)*HyP->num_u_blks_Phi); memcpy(A_gpu->scubufs[streamId].Remain_L_buff_host, HyP->Remain_L_buff, sizeof(doublecomplex)*Remain_lbuf_send_size); memcpy(A_gpu->scubufs[streamId].bigU_host, bigU, sizeof(doublecomplex)*bigu_send_size); return 0; } /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. After factorization, the GPU LU structure should be freed by calling zfree_LUstruct_gpu(). */ void zCopyLUToGPU3D ( int* isNodeInMyGrid, zLocalLU_t *A_host, /* distributed LU structure on host */ zsluGPU_t *sluGPU, /* hold LU structure on GPU */ Glu_persist_t *Glu_persist, int_t n, gridinfo3d_t *grid3d, int_t buffer_size, /* bigV size on GPU for Schur complement update */ int_t bigu_size, int_t ldt, SuperLUStat_t *stat ) { gridinfo_t* grid = &(grid3d->grid2d); zLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; zLUstruct_gpu_t **dA_gpu = &(sluGPU->dA_gpu); #if ( PRNTlevel>=1 ) if ( grid3d->iam == 0 ) print_occupancy(); #endif #ifdef GPU_DEBUG // if ( grid3d->iam == 0 ) { gpuDeviceProp devProp; gpuGetDeviceProperties(&devProp, 0); printDevProp(devProp); } #endif int_t *xsup ; xsup = Glu_persist->xsup; int iam = grid->iam; int nsupers = Glu_persist->supno[n - 1] + 1; int_t Pc = grid->npcol; int_t Pr = grid->nprow; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; int_t remain_l_max = A_host->bufmax[1]; /*copies of scalars for easy access*/ A_gpu->nsupers = nsupers; stat->ScatterMOPCounter = 0; stat->GemmFLOPCounter = 0; stat->cPCIeH2D = 0; stat->cPCIeD2H = 0; stat->tHost_PCIeH2D = 0; stat->tHost_PCIeD2H = 0; /*initializing memory*/ size_t max_gpu_memory = get_acc_memory (); size_t gpu_mem_used = 0; void *tmp_ptr; A_gpu->xsup_host = xsup; int_t nGPUStreams = sluGPU->nGPUStreams; /*pinned memory allocations. Paged-locked memory by gpuMallocHost is accessible to the device.*/ for (int streamId = 0; streamId < nGPUStreams; streamId++ ) { void *tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, (n) * sizeof(int_t) )) ; A_gpu->scubufs[streamId].usub_IndirectJ3_host = (int_t*) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, ( n) * sizeof(int_t) )); A_gpu->scubufs[streamId].usub_IndirectJ3 = (int_t*) tmp_ptr; gpu_mem_used += ( n) * sizeof(int_t); checkGPUErrors(gpuMallocHost( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info_host = (Remain_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info_host = (Ublock_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, remain_l_max * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].Remain_L_buff_host = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, bigu_size * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].bigU_host = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(doublecomplex) * (A_host->bufmax[1]))); A_gpu->acc_L_buff = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(doublecomplex) * (A_host->bufmax[3]))); A_gpu->acc_U_buff = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[0]))); A_gpu->scubufs[streamId].lsub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[2]))); A_gpu->scubufs[streamId].usub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, remain_l_max * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].Remain_L_buff = (doublecomplex *) tmp_ptr; gpu_mem_used += remain_l_max * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, bigu_size * sizeof(doublecomplex) )) ; A_gpu->scubufs[streamId].bigU = (doublecomplex *) tmp_ptr; gpu_mem_used += bigu_size * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info = (Ublock_info_t *) tmp_ptr; gpu_mem_used += mcb * sizeof(Ublock_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info = (Remain_info_t *) tmp_ptr; gpu_mem_used += mrb * sizeof(Remain_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, buffer_size * sizeof(doublecomplex))) ; A_gpu->scubufs[streamId].bigV = (doublecomplex *) tmp_ptr; gpu_mem_used += buffer_size * sizeof(doublecomplex); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[0]*sizeof(int_t))) ; A_gpu->scubufs[streamId].lsub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[0] * sizeof(int_t); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[2]*sizeof(int_t))) ; A_gpu->scubufs[streamId].usub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[2] * sizeof(int_t); } /* endfor streamID ... allocate paged-locked memory */ stat->isOffloaded = (int *) SUPERLU_MALLOC (sizeof(int) * nsupers); stat->GemmStart = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->GemmEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ScatterEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeH2D = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeD2H_Start = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); stat->ePCIeD2H_End = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); for (int i = 0; i < nsupers; ++i) { stat->isOffloaded[i] = 0; checkGPUErrors(gpuEventCreate(&(stat->GemmStart[i]))); checkGPUErrors(gpuEventCreate(&(stat->GemmEnd[i]))); checkGPUErrors(gpuEventCreate(&(stat->ScatterEnd[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeD2H_Start[i]))); checkGPUErrors(gpuEventCreate(&(stat->ePCIeD2H_End[i]))); } /*---- Copy L data structure to GPU ----*/ /*pointers and address of local blocks for easy accessibility */ local_l_blk_info_t *local_l_blk_infoVec; int_t * local_l_blk_infoPtr; local_l_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pc) * sizeof(int_t ) ); /* First pass: count total L blocks */ int_t cum_num_l_blocks = 0; /* total number of L blocks I own */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { /* going through each block column I own */ if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; cum_num_l_blocks += num_l_blocks; } } /*allocating memory*/ local_l_blk_infoVec = (local_l_blk_info_t *) malloc(cum_num_l_blocks * sizeof(local_l_blk_info_t)); /* Second pass: set up the meta-data for the L structure */ cum_num_l_blocks = 0; /*initialzing vectors */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; /* # L blocks in this column */ if (num_l_blocks > 0) { local_l_blk_info_t *local_l_blk_info_i = local_l_blk_infoVec + cum_num_l_blocks; local_l_blk_infoPtr[i] = cum_num_l_blocks; int_t lptrj = BC_HEADER; int_t luptrj = 0; for (int_t j = 0; j < num_l_blocks ; ++j) { int_t ijb = index[lptrj]; local_l_blk_info_i[j].lib = ijb / Pr; local_l_blk_info_i[j].lptrj = lptrj; local_l_blk_info_i[j].luptrj = luptrj; luptrj += index[lptrj + 1]; lptrj += LB_DESCRIPTOR + index[lptrj + 1]; } } cum_num_l_blocks += num_l_blocks; } } /* endfor all block columns */ /* Allocate L memory on GPU, and copy the values from CPU to GPU */ checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_l_blocks * sizeof(local_l_blk_info_t))) ; A_gpu->local_l_blk_infoVec = (local_l_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_l_blocks * sizeof(local_l_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoVec), local_l_blk_infoVec, cum_num_l_blocks * sizeof(local_l_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pc)*sizeof(int_t))) ; A_gpu->local_l_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pc) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoPtr), local_l_blk_infoPtr, CEILING(nsupers, Pc)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /*---- Copy U data structure to GPU ----*/ local_u_blk_info_t *local_u_blk_infoVec; int_t * local_u_blk_infoPtr; local_u_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pr) * sizeof(int_t ) ); /* First pass: count total U blocks */ int_t cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; cum_num_u_blocks += num_u_blocks; } } local_u_blk_infoVec = (local_u_blk_info_t *) malloc(cum_num_u_blocks * sizeof(local_u_blk_info_t)); /* Second pass: set up the meta-data for the U structure */ cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; if (num_u_blocks > 0) { local_u_blk_info_t *local_u_blk_info_i = local_u_blk_infoVec + cum_num_u_blocks; local_u_blk_infoPtr[i] = cum_num_u_blocks; int_t iuip_lib, ruip_lib; iuip_lib = BR_HEADER; ruip_lib = 0; for (int_t j = 0; j < num_u_blocks ; ++j) { int_t ijb = index[iuip_lib]; local_u_blk_info_i[j].ljb = ijb / Pc; local_u_blk_info_i[j].iuip = iuip_lib; local_u_blk_info_i[j].ruip = ruip_lib; ruip_lib += index[iuip_lib + 1]; iuip_lib += UB_DESCRIPTOR + SuperSize (ijb); } } cum_num_u_blocks += num_u_blocks; } } checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_u_blocks * sizeof(local_u_blk_info_t))) ; A_gpu->local_u_blk_infoVec = (local_u_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_u_blocks * sizeof(local_u_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoVec), local_u_blk_infoVec, cum_num_u_blocks * sizeof(local_u_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pr)*sizeof(int_t))) ; A_gpu->local_u_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pr) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoPtr), local_u_blk_infoPtr, CEILING(nsupers, Pr)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /* Copy the actual L indices and values */ int_t l_k = CEILING( nsupers, grid->npcol ); /* # of local block columns */ int_t *temp_LrowindPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *temp_LnzvalPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *Lnzval_size = (int_t *) malloc(sizeof(int_t) * l_k); int_t l_ind_len = 0; int_t l_val_len = 0; for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; temp_LrowindPtr[ljb] = l_ind_len; temp_LnzvalPtr[ljb] = l_val_len; // ### Lnzval_size[ljb] = 0; //### if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; /* Global block number is mycol + ljb*Pc */ int_t nsupc = SuperSize(jb); l_ind_len += len1; l_val_len += len * nsupc; Lnzval_size[ljb] = len * nsupc ; // ### } else { Lnzval_size[ljb] = 0 ; // ### } } } /* endfor jb = 0 ... */ /* Copy the actual U indices and values */ int_t u_k = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ int_t *temp_UrowindPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *temp_UnzvalPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *Unzval_size = (int_t *) malloc(sizeof(int_t) * u_k); int_t u_ind_len = 0; int_t u_val_len = 0; for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; temp_UrowindPtr[lb] = u_ind_len; temp_UnzvalPtr[lb] = u_val_len; Unzval_size[lb] = 0; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len = index_host[1]; int_t len1 = index_host[2]; u_ind_len += len1; u_val_len += len; Unzval_size[lb] = len; } else { Unzval_size[lb] = 0; } } gpu_mem_used += l_ind_len * sizeof(int_t); gpu_mem_used += 2 * l_k * sizeof(int_t); gpu_mem_used += u_ind_len * sizeof(int_t); gpu_mem_used += 2 * u_k * sizeof(int_t); /*left memory shall be divided among the two */ for (int_t i = 0; i < l_k; ++i) { temp_LnzvalPtr[i] = -1; } for (int_t i = 0; i < u_k; ++i) { temp_UnzvalPtr[i] = -1; } /*setting these pointers back */ l_val_len = 0; u_val_len = 0; int_t num_gpu_l_blocks = 0; int_t num_gpu_u_blocks = 0; size_t mem_l_block, mem_u_block; /* Find the trailing matrix size that can fit into GPU memory */ for (int_t i = nsupers - 1; i > -1; --i) { /* ulte se chalte hai eleimination tree */ /* bottom up ordering */ int_t i_sup = A_gpu->perm_c_supno[i]; int_t pc = PCOL( i_sup, grid ); if (isNodeInMyGrid[i_sup] == 1) { if (mycol == pc ) { int_t ljb = LBj(i_sup, grid); mem_l_block = sizeof(doublecomplex) * Lnzval_size[ljb]; if (gpu_mem_used + mem_l_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_l_block; temp_LnzvalPtr[ljb] = l_val_len; l_val_len += Lnzval_size[ljb]; num_gpu_l_blocks++; A_gpu->first_l_block_gpu = i; } } int_t pr = PROW( i_sup, grid ); if (myrow == pr) { int_t lib = LBi(i_sup, grid); mem_u_block = sizeof(doublecomplex) * Unzval_size[lib]; if (gpu_mem_used + mem_u_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_u_block; temp_UnzvalPtr[lib] = u_val_len; u_val_len += Unzval_size[lib]; num_gpu_u_blocks++; A_gpu->first_u_block_gpu = i; } } } /* endif */ } /* endfor i .... nsupers */ #if (PRNTlevel>=2) printf("(%d) Number of L blocks in GPU %d, U blocks %d\n", grid3d->iam, num_gpu_l_blocks, num_gpu_u_blocks ); printf("(%d) elimination order of first block in GPU: L block %d, U block %d\n", grid3d->iam, A_gpu->first_l_block_gpu, A_gpu->first_u_block_gpu); printf("(%d) Memory of L %.1f GB, memory for U %.1f GB, Total device memory used %.1f GB, Memory allowed %.1f GB \n", grid3d->iam, l_val_len * sizeof(doublecomplex) * 1e-9, u_val_len * sizeof(doublecomplex) * 1e-9, gpu_mem_used * 1e-9, max_gpu_memory * 1e-9); fflush(stdout); #endif /* Assemble index vector on temp */ int_t *indtemp = (int_t *) malloc(sizeof(int_t) * l_ind_len); for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; memcpy(&indtemp[temp_LrowindPtr[ljb]] , index_host, len1 * sizeof(int_t)) ; } } } checkGPUErrors(gpuMalloc( &tmp_ptr, l_ind_len * sizeof(int_t))) ; A_gpu->LrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindVec), indtemp, l_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_val_len * sizeof(doublecomplex))); A_gpu->LnzvalVec = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->LnzvalVec), 0, l_val_len * sizeof(doublecomplex))); checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindPtr), temp_LrowindPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LnzvalPtr), temp_LnzvalPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->LnzvalPtr_host = temp_LnzvalPtr; int_t *indtemp1 = (int_t *) malloc(sizeof(int_t) * u_ind_len); for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len1 = index_host[2]; memcpy(&indtemp1[temp_UrowindPtr[lb]] , index_host, sizeof(int_t)*len1); } } checkGPUErrors(gpuMalloc( &tmp_ptr, u_ind_len * sizeof(int_t))) ; A_gpu->UrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindVec), indtemp1, u_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, u_val_len * sizeof(doublecomplex))); A_gpu->UnzvalVec = (doublecomplex *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->UnzvalVec), 0, u_val_len * sizeof(doublecomplex))); checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindPtr), temp_UrowindPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->UnzvalPtr_host = temp_UnzvalPtr; checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UnzvalPtr), temp_UnzvalPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, (nsupers + 1)*sizeof(int_t))) ; A_gpu->xsup = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->xsup), xsup, (nsupers + 1)*sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, sizeof(zLUstruct_gpu_t))) ; *dA_gpu = (zLUstruct_gpu_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( *dA_gpu, A_gpu, sizeof(zLUstruct_gpu_t), gpuMemcpyHostToDevice)) ; free (temp_LrowindPtr); free (temp_UrowindPtr); free (indtemp1); free (indtemp); } /* end zCopyLUToGPU3D */ int zreduceAllAncestors3d_GPU ( int_t ilvl, int_t* myNodeCount, int_t** treePerm, zLUValSubBuf_t*LUvsb, zLUstruct_t* LUstruct, gridinfo3d_t* grid3d, zsluGPU_t *sluGPU, d2Hreduce_t* d2Hred, factStat_t *factStat, HyP_t* HyP, SCT_t* SCT, SuperLUStat_t *stat ) { // first synchronize all gpu streams int superlu_acc_offload = HyP->superlu_acc_offload; int_t maxLvl = log2i( (int_t) grid3d->zscp.Np) + 1; int_t myGrid = grid3d->zscp.Iam; gridinfo_t* grid = &(grid3d->grid2d); int_t* gpuLUreduced = factStat->gpuLUreduced; int_t sender; if ((myGrid % (1 << (ilvl + 1))) == 0) { sender = myGrid + (1 << ilvl); } else { sender = myGrid; } /*Reduce all the ancestors from the GPU*/ if (myGrid == sender && superlu_acc_offload) { for (int_t streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } for (int_t alvl = ilvl + 1; alvl < maxLvl; ++alvl) { /* code */ // int_t atree = myTreeIdxs[alvl]; int_t nsAncestor = myNodeCount[alvl]; int_t* cAncestorList = treePerm[alvl]; for (int_t node = 0; node < nsAncestor; node++ ) { int_t k = cAncestorList[node]; if (!gpuLUreduced[k]) { zinitD2Hreduce(k, d2Hred, 1, HyP, sluGPU, grid, LUstruct, SCT); int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; double tt_start1 = SuperLU_timer_(); SCT->PhiMemCpyTimer += SuperLU_timer_() - tt_start1; if (copyL_kljb || copyU_kljb) SCT->PhiMemCpyCounter++; zsendLUpanelGPU2HOST(k, d2Hred, sluGPU, stat); /* Reduce the LU panels from GPU */ zreduceGPUlu(1, d2Hred, sluGPU, SCT, grid, LUstruct); gpuLUreduced[k] = 1; } } } } /*if (myGrid == sender)*/ zreduceAllAncestors3d(ilvl, myNodeCount, treePerm, LUvsb, LUstruct, grid3d, SCT ); return 0; } /* zreduceAllAncestors3d_GPU */ void zsyncAllfunCallStreams(zsluGPU_t* sluGPU, SCT_t* SCT) { for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } }
0fb58dbd4703bbc966df9152d2bf41e47b895673.hip
// !!! This is a file automatically generated by hipify!!! #include<utility> #include<stdio.h> #include<assert.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> #include <time.h> #include <string.h> #include <utility> #include <time.h> // HEADER FILES #include "Helper/createFileStrings.h" #include "Helper/fillThreadsPerBlock.h" #include "Helper/jacobi.h" #include "Helper/residual.h" #include "Helper/solution_error.h" #include "Helper/setGPU.h" // Header file for shared jacobi code #include "jacobi-1D-shared.h" int main(int argc, char *argv[]) { // INPUTS /////////////////////////////////////////////////////////////// // SET CUDA DEVICE TO USE (IMPORTANT FOR ENDEAVOUR WHICH HAS 2!) // NAVIER-STOKES GPUs: "Quadro K420" // ENDEAVOUR GPUs: "TITAN V" OR "GeForce GTX 1080 Ti" std::string gpuToUse = "TITAN V"; setGPU(gpuToUse); // PARSE INPUTS const int nDim = atoi(argv[1]); const int residual_convergence_metric_flag = atoi(argv[2]); const double tolerance_value = atof(argv[3]); const int tolerance_reduction_flag = atoi(argv[4]); const int relaxation_flag = 1; // DEFAULT PARAMETERS const int numTrials = 20; int threadsPerBlock, subIterations; // INITIALIZE ARRAYS int nGrids = nDim + 2; double * initX = new double[nGrids]; double * rhs = new double[nGrids]; // 1D POISSON MATRIX for (int iGrid = 0; iGrid < nGrids; ++iGrid) { if (iGrid == 0 || iGrid == nGrids-1) { initX[iGrid] = 0.0f; } else { initX[iGrid] = 1.0f; } rhs[iGrid] = 1.0f; } // LOAD EXACT SOLUTION IF SOLUTION ERROR IS THE CRITERION FOR CONVERGENCE double * solution_exact = new double[nGrids]; double initSolutionError; if (residual_convergence_metric_flag == 0) { std::string SOLUTIONEXACT_FILENAME = "solution_exact_N32.txt"; loadSolutionExact(solution_exact, SOLUTIONEXACT_FILENAME, nGrids); initSolutionError = solutionError1DPoisson(initX, solution_exact, nGrids); } // COMPUTE TOLERANCE BASED ON RESIDUAL/ERROR AND INPUTS FROM PYTHON double TOL; double initResidual = residual1DPoisson(initX, rhs, nGrids); if (tolerance_reduction_flag == 0) { TOL = tolerance_value; } else if (tolerance_reduction_flag == 1 && residual_convergence_metric_flag == 1) { TOL = initResidual / tolerance_value; } else if (tolerance_reduction_flag == 1 && residual_convergence_metric_flag == 0) { // TOL = initSolutionError / tolerance_value; TOL = (1.0 - 0.01 * tolerance_value) * initSolutionError; } // THREADS PER BLOCK VALUES int* threadsPerBlock_array = new int[6]; fillThreadsPerBlockArray(threadsPerBlock_array); // DEFINE CUDA EVENTS hipEvent_t start_sh, stop_sh; hipEventCreate(&start_sh); hipEventCreate(&stop_sh); // PRINTOUT // Print parameters of the problem to screen printf("===============INFORMATION============================\n"); printf("Number of unknowns: %d\n", nDim); printf("Number of Trials: %d\n", numTrials); if (residual_convergence_metric_flag == 1) { printf("Residual of initial solution: %f\n", initResidual); } else if (residual_convergence_metric_flag == 0) { printf("Solution Error of initial solution: %f\n", initSolutionError); } printf("Desired TOL of residual/solution error: %f\n", TOL); printf("======================================================\n"); // NECESSARY CONTAINERS hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); int OVERLAP; int numIterations; float sharedJacobiTime; float totalTime = 0.0; std::string SHARED_FILE_NAME; std::ofstream timings_sh; for (int tpb_idx = 0; tpb_idx < 6; tpb_idx = tpb_idx + 1) { threadsPerBlock = threadsPerBlock_array[tpb_idx]; numIterations = threadsPerBlock / 2; subIterations = threadsPerBlock / 2; SHARED_FILE_NAME = createFileStringOverlap(nDim, threadsPerBlock, residual_convergence_metric_flag, tolerance_value, tolerance_reduction_flag, relaxation_flag); std::cout << SHARED_FILE_NAME << std::endl; timings_sh.open(SHARED_FILE_NAME.c_str(), std::ios_base::app); int * sharedCycles = new int[numIterations]; double * sharedJacobiTimeArray = new double[numIterations]; double * sharedJacobiResidual = new double[numIterations]; double * sharedJacobiSolutionError = new double[numIterations]; double * solutionJacobiShared = new double[nGrids]; for (int i = 0; i < numIterations; i++) { // OBTAIN NUMBER OF CYCLES TO CONVERGE FOR GIVEN OVERLAP OVERLAP = 2*i; if (residual_convergence_metric_flag == 1) { sharedCycles[i] = jacobiSharedIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations); } else if (residual_convergence_metric_flag == 0) { sharedCycles[i] = jacobiSharedIterationCountSolutionError(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations, solution_exact); } for (int iter = 0; iter < numTrials; iter++) { //if (threadsPerBlock == innerSubdomainLength) { // GET FINAL SOLUTION hipEventRecord(start_sh, 0); solutionJacobiShared = jacobiShared(initX, rhs, nGrids, sharedCycles[i], threadsPerBlock, OVERLAP, subIterations); // OBTAIN FINAL TIMES REQUIRED hipEventRecord(stop_sh, 0); hipEventSynchronize(stop_sh); hipEventElapsedTime(&sharedJacobiTime, start_sh, stop_sh); totalTime = totalTime + sharedJacobiTime; printf("THREADS PER BLOCK: %d, OVERLAP = %d/%d, TRIAL %d/%d\n", threadsPerBlock, OVERLAP, threadsPerBlock-2, iter, numTrials); } sharedJacobiTimeArray[i] = totalTime / numTrials; printf("Number of Cycles: %d (subiterations = %d) \n", sharedCycles[i], threadsPerBlock/2); printf("Time needed for the Jacobi Shared: %f ms\n", sharedJacobiTimeArray[i]); if (residual_convergence_metric_flag == 1) { sharedJacobiResidual[i] = residual1DPoisson(solutionJacobiShared, rhs, nGrids); printf("Residual is %f\n", sharedJacobiResidual[i]); timings_sh << OVERLAP << " " << sharedCycles[i] << " " << sharedJacobiTimeArray[i] << " " << sharedJacobiResidual[i] << " " << numTrials << " " << "\n"; } else if (residual_convergence_metric_flag == 0) { sharedJacobiSolutionError[i] = solutionError1DPoisson(solutionJacobiShared, solution_exact, nGrids); printf("Solution Error is %f\n", sharedJacobiSolutionError[i]); timings_sh << OVERLAP << " " << sharedCycles[i] << " " << sharedJacobiTimeArray[i] << " " << sharedJacobiSolutionError[i] << " " << numTrials << " " << "\n"; } printf("================================================\n"); totalTime = 0.0; } timings_sh.close(); delete[] sharedCycles; delete[] sharedJacobiTimeArray; delete[] sharedJacobiResidual; delete[] sharedJacobiSolutionError; delete[] solutionJacobiShared; } // FREE MEMORY delete[] initX; delete[] rhs; delete[] solution_exact; delete[] threadsPerBlock_array; return 0; }
0fb58dbd4703bbc966df9152d2bf41e47b895673.cu
#include<utility> #include<stdio.h> #include<assert.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> #include <time.h> #include <string.h> #include <utility> #include <time.h> // HEADER FILES #include "Helper/createFileStrings.h" #include "Helper/fillThreadsPerBlock.h" #include "Helper/jacobi.h" #include "Helper/residual.h" #include "Helper/solution_error.h" #include "Helper/setGPU.h" // Header file for shared jacobi code #include "jacobi-1D-shared.h" int main(int argc, char *argv[]) { // INPUTS /////////////////////////////////////////////////////////////// // SET CUDA DEVICE TO USE (IMPORTANT FOR ENDEAVOUR WHICH HAS 2!) // NAVIER-STOKES GPUs: "Quadro K420" // ENDEAVOUR GPUs: "TITAN V" OR "GeForce GTX 1080 Ti" std::string gpuToUse = "TITAN V"; setGPU(gpuToUse); // PARSE INPUTS const int nDim = atoi(argv[1]); const int residual_convergence_metric_flag = atoi(argv[2]); const double tolerance_value = atof(argv[3]); const int tolerance_reduction_flag = atoi(argv[4]); const int relaxation_flag = 1; // DEFAULT PARAMETERS const int numTrials = 20; int threadsPerBlock, subIterations; // INITIALIZE ARRAYS int nGrids = nDim + 2; double * initX = new double[nGrids]; double * rhs = new double[nGrids]; // 1D POISSON MATRIX for (int iGrid = 0; iGrid < nGrids; ++iGrid) { if (iGrid == 0 || iGrid == nGrids-1) { initX[iGrid] = 0.0f; } else { initX[iGrid] = 1.0f; } rhs[iGrid] = 1.0f; } // LOAD EXACT SOLUTION IF SOLUTION ERROR IS THE CRITERION FOR CONVERGENCE double * solution_exact = new double[nGrids]; double initSolutionError; if (residual_convergence_metric_flag == 0) { std::string SOLUTIONEXACT_FILENAME = "solution_exact_N32.txt"; loadSolutionExact(solution_exact, SOLUTIONEXACT_FILENAME, nGrids); initSolutionError = solutionError1DPoisson(initX, solution_exact, nGrids); } // COMPUTE TOLERANCE BASED ON RESIDUAL/ERROR AND INPUTS FROM PYTHON double TOL; double initResidual = residual1DPoisson(initX, rhs, nGrids); if (tolerance_reduction_flag == 0) { TOL = tolerance_value; } else if (tolerance_reduction_flag == 1 && residual_convergence_metric_flag == 1) { TOL = initResidual / tolerance_value; } else if (tolerance_reduction_flag == 1 && residual_convergence_metric_flag == 0) { // TOL = initSolutionError / tolerance_value; TOL = (1.0 - 0.01 * tolerance_value) * initSolutionError; } // THREADS PER BLOCK VALUES int* threadsPerBlock_array = new int[6]; fillThreadsPerBlockArray(threadsPerBlock_array); // DEFINE CUDA EVENTS cudaEvent_t start_sh, stop_sh; cudaEventCreate(&start_sh); cudaEventCreate(&stop_sh); // PRINTOUT // Print parameters of the problem to screen printf("===============INFORMATION============================\n"); printf("Number of unknowns: %d\n", nDim); printf("Number of Trials: %d\n", numTrials); if (residual_convergence_metric_flag == 1) { printf("Residual of initial solution: %f\n", initResidual); } else if (residual_convergence_metric_flag == 0) { printf("Solution Error of initial solution: %f\n", initSolutionError); } printf("Desired TOL of residual/solution error: %f\n", TOL); printf("======================================================\n"); // NECESSARY CONTAINERS cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); int OVERLAP; int numIterations; float sharedJacobiTime; float totalTime = 0.0; std::string SHARED_FILE_NAME; std::ofstream timings_sh; for (int tpb_idx = 0; tpb_idx < 6; tpb_idx = tpb_idx + 1) { threadsPerBlock = threadsPerBlock_array[tpb_idx]; numIterations = threadsPerBlock / 2; subIterations = threadsPerBlock / 2; SHARED_FILE_NAME = createFileStringOverlap(nDim, threadsPerBlock, residual_convergence_metric_flag, tolerance_value, tolerance_reduction_flag, relaxation_flag); std::cout << SHARED_FILE_NAME << std::endl; timings_sh.open(SHARED_FILE_NAME.c_str(), std::ios_base::app); int * sharedCycles = new int[numIterations]; double * sharedJacobiTimeArray = new double[numIterations]; double * sharedJacobiResidual = new double[numIterations]; double * sharedJacobiSolutionError = new double[numIterations]; double * solutionJacobiShared = new double[nGrids]; for (int i = 0; i < numIterations; i++) { // OBTAIN NUMBER OF CYCLES TO CONVERGE FOR GIVEN OVERLAP OVERLAP = 2*i; if (residual_convergence_metric_flag == 1) { sharedCycles[i] = jacobiSharedIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations); } else if (residual_convergence_metric_flag == 0) { sharedCycles[i] = jacobiSharedIterationCountSolutionError(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations, solution_exact); } for (int iter = 0; iter < numTrials; iter++) { //if (threadsPerBlock == innerSubdomainLength) { // GET FINAL SOLUTION cudaEventRecord(start_sh, 0); solutionJacobiShared = jacobiShared(initX, rhs, nGrids, sharedCycles[i], threadsPerBlock, OVERLAP, subIterations); // OBTAIN FINAL TIMES REQUIRED cudaEventRecord(stop_sh, 0); cudaEventSynchronize(stop_sh); cudaEventElapsedTime(&sharedJacobiTime, start_sh, stop_sh); totalTime = totalTime + sharedJacobiTime; printf("THREADS PER BLOCK: %d, OVERLAP = %d/%d, TRIAL %d/%d\n", threadsPerBlock, OVERLAP, threadsPerBlock-2, iter, numTrials); } sharedJacobiTimeArray[i] = totalTime / numTrials; printf("Number of Cycles: %d (subiterations = %d) \n", sharedCycles[i], threadsPerBlock/2); printf("Time needed for the Jacobi Shared: %f ms\n", sharedJacobiTimeArray[i]); if (residual_convergence_metric_flag == 1) { sharedJacobiResidual[i] = residual1DPoisson(solutionJacobiShared, rhs, nGrids); printf("Residual is %f\n", sharedJacobiResidual[i]); timings_sh << OVERLAP << " " << sharedCycles[i] << " " << sharedJacobiTimeArray[i] << " " << sharedJacobiResidual[i] << " " << numTrials << " " << "\n"; } else if (residual_convergence_metric_flag == 0) { sharedJacobiSolutionError[i] = solutionError1DPoisson(solutionJacobiShared, solution_exact, nGrids); printf("Solution Error is %f\n", sharedJacobiSolutionError[i]); timings_sh << OVERLAP << " " << sharedCycles[i] << " " << sharedJacobiTimeArray[i] << " " << sharedJacobiSolutionError[i] << " " << numTrials << " " << "\n"; } printf("================================================\n"); totalTime = 0.0; } timings_sh.close(); delete[] sharedCycles; delete[] sharedJacobiTimeArray; delete[] sharedJacobiResidual; delete[] sharedJacobiSolutionError; delete[] solutionJacobiShared; } // FREE MEMORY delete[] initX; delete[] rhs; delete[] solution_exact; delete[] threadsPerBlock_array; return 0; }
15942721b0be6bfc5d8de8d26c08e57fd8512e21.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <string> #include <stdlib.h> #include <hip/hip_runtime.h> using namespace std; struct BLOCK { int x; int y; int z; int width; int height; int depth; float temp; }; struct CONF { int type; float k; int timestep; int x; int y; int z; float def_temp; //default starting temperature for nodes vector<BLOCK> Heater; }; #define THREAD_SIZE 8 #define DIVIDE(a,b) ((a) + (b) - 1)/(b) //the position of (x0,y0,z0) in 1D array #define POS(x0,y0,z0) ((x0) + (y0) * data->x + (z0) * data->x * data->y) #define POS1(x0,y0,z0) ((x0) + (y0) * data.x + (z0) * data.x * data.y) void ReadFile(string & file_path, CONF & data) { fstream fin(file_path, ios_base::in); string tmp; while (getline(fin, tmp)) { if (tmp.find('#') != string::npos) { //this line should be ignored continue; } else break; } int flag = 0; //read type if (tmp.find("2D") != string::npos) { data.type = 2; } else if (tmp.find("3D") != string::npos) { data.type = 3; } flag = 1; while (getline(fin, tmp)) { if (tmp.find('#') != string::npos || tmp[0] == '\r') { //this line should be ignored continue; } else if (flag == 1) { //read k data.k = stof(tmp); flag = 2; } else if (flag == 2) { //read timesteps data.timestep = stoi(tmp); flag = 3; } else if (flag == 3) { //read x,y,z stringstream ss(tmp); ss.str(tmp); char c; if (data.type == 2) { ss >> data.x >> c >> data.y; data.z = 1; } else if (data.type == 3) { ss >> data.x >> c >> data.y >> c >> data.z; } flag = 4; } else if (flag == 4) { //read default starting temperature data.def_temp = stof(tmp); flag = 5; } else if (flag == 5) { //read temperature blocks stringstream ss(tmp); ss.str(tmp); char c; BLOCK b; if (data.type == 2) { ss >> b.x >> c >> b.y >> c >> b.width >> c >> b.height >> c >> b.temp; b.z = 0; b.depth = 1; } else if (data.type == 3) { ss >> b.x >> c >> b.y >> c >> b.z >> c >> b.width >> c >> b.height >> c >> b.depth >> c >> b.temp; } data.Heater.push_back(b); } } fin.close(); } void PrintConf(CONF & data) { cout.setf(ios::fixed); cout << "Dimension: " << data.type << "D" << endl; cout << "k = " << data.k << endl; cout << "timestep = " << data.timestep << endl; cout << "grid axis: \n" << "x = " << data.x << ", y = " << data.y << ", z = " << data.z << endl; cout << "default temperature = " << data.def_temp << endl; cout << "fixed blocks: " << endl; for (auto &i : data.Heater) { cout << "(" << i.x << ',' << i.y << ',' << i.z << "): width = " << i.width << " height = " << i.height << " depth = " << i.depth << " temp = " << i.temp << endl; } cout << endl; } void initialize(CONF &data, vector<float> &u) { // set all nodes to default temperature u.assign(data.x * data.y * data.z, data.def_temp); // set the heater blocks' temperature for (int i = 0; i < data.Heater.size(); i++) { BLOCK b = data.Heater[i]; for (int m = b.z; m < b.z + b.depth; m++) { for (int j = b.y; j < b.y + b.height; j++) { for (int k = b.x; k < b.x + b.width; k++) { u[POS1(k, j, m)] = b.temp; } } } } } void print(const CONF &data, const vector<float> &u) { ofstream ofp; ofp.open("output.csv", ios_base::out); cout.setf(ios::fixed); for (int m = 0; m < data.z; m++) { for (int j = 0; j < data.y; j++) { for (int k = 0; k < data.x - 1; k++) { //cout << u[POS1(k, j, m)] << " "; ofp << u[POS1(k, j, m)] << ','; } //cout << u[POS1(data.x - 1, j, m)] << endl; ofp << u[POS1(data.x - 1, j, m)] << endl; } //cout << endl; ofp << endl; } //cout << endl; ofp.close(); } // if node belongs to the fixed blocks, return true; otherwise return false. __device__ bool srcBlock(int x0, int y0, int z0, BLOCK* &Heater, int count) { for (int i = 0; i < count; i++) { if (x0 >= Heater[i].x && x0 < Heater[i].x + Heater[i].width && y0 >= Heater[i].y && y0 < Heater[i].y + Heater[i].height && z0 >= Heater[i].z && z0 < Heater[i].z + Heater[i].depth) return true; } return false; } __global__ void heat2D3D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z; //2D: Tnew = Told + k (Ttop + Tbottom + Tleft + Tright 4 Told) //3D: Tnew = Told + k (Tfront + Tback + Ttop + Tbottom + Tleft + Tright 6 Told) if (x < data->x && y < data->y && z < data->z) { if (srcBlock(x, y, z, pHeater, count)) { // fixed blocks, u_new = u u_new[POS(x, y, z)] = u[POS(x, y, z)]; } else if (data->type == 2) { if (x == 0) { if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] - 2 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] - 2 * u[POS(x, y, z)]); } else { //x = 0, 0 < y < Y-1 u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } } else if (x == data->x - 1) { if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] - 2 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] - 2 * u[POS(x, y, z)]); } else { //x = X-1, 0 < y < Y-1 u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } } else { //0 < x < X-1 if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 4 * u[POS(x, y, z)]); } } } else if (data->type == 3) { if (x == 0) { if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else { //x = 0, 0 < y < Y-1 if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } } else if (x == data->x - 1) { if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else { //x = X-1, 0 < y < Y-1 if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } } else { //0 < x < X-1 if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } else { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 5 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 6 * u[POS(x, y, z)]); } } } } } float *temp = u_new; u_new = u; u = temp; } //glocal variables CONF data; int main(int argc, char* argv[]) { if (argc != 2) { cout << "Input parameters error!" << endl; return 1; } /*****Read Configuration File*****/ string file_path = argv[1]; //string file_path = "C:\\Users\\leyic\\Documents\\Sublime\\6122-p2\\3D.conf"; ReadFile(file_path, data); PrintConf(data); /*****Initialize Data*****/ vector<float> u; vector<float> u_new; initialize(data, u); u_new = u; //print(data, u); // for (auto &i : u) { // cout << i << " "; // } cout << endl; /******Cuda Initialize*****/ dim3 threads_num = dim3(THREAD_SIZE, THREAD_SIZE); dim3 blocks_num = dim3(DIVIDE(data.x, THREAD_SIZE), DIVIDE(data.y, THREAD_SIZE), data.z); //allocate space for device copies float *d_u, *d_u_new, *temp; CONF* d_data; int size = data.x * data.y * data.z; BLOCK* d_heater; int count = data.Heater.size(); hipMalloc((void **)&d_u, size * sizeof(float)); hipMalloc((void **)&d_u_new, size * sizeof(float)); hipMalloc((void **)&d_data, sizeof(CONF)); hipMalloc((void **)&d_heater, count * sizeof(BLOCK)); //copy inputs to device hipMemcpy(d_u_new, &u_new[0], size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_data, &data, sizeof(CONF), hipMemcpyHostToDevice); hipMemcpy(d_heater, data.Heater.data(), count * sizeof(BLOCK), hipMemcpyHostToDevice); hipMemcpy(d_u, &u[0], size * sizeof(float), hipMemcpyHostToDevice); /*****Compute Heat Diffusion*****/ for (int i = 0; i < data.timestep; i++) { hipLaunchKernelGGL(( heat2D3D) , dim3(blocks_num), dim3(threads_num), 0, 0, d_u, d_u_new, d_data, d_heater, count); temp = d_u_new; d_u_new = d_u; d_u = temp; } hipMemcpy(&u[0], d_u, size * sizeof(float), hipMemcpyDeviceToHost); print(data, u); /*****Free*****/ hipFree(d_u); hipFree(d_u_new); hipFree(d_data); hipFree(d_heater); return 0; }
15942721b0be6bfc5d8de8d26c08e57fd8512e21.cu
#include <iostream> #include <fstream> #include <sstream> #include <vector> #include <string> #include <stdlib.h> #include <cuda.h> using namespace std; struct BLOCK { int x; int y; int z; int width; int height; int depth; float temp; }; struct CONF { int type; float k; int timestep; int x; int y; int z; float def_temp; //default starting temperature for nodes vector<BLOCK> Heater; }; #define THREAD_SIZE 8 #define DIVIDE(a,b) ((a) + (b) - 1)/(b) //the position of (x0,y0,z0) in 1D array #define POS(x0,y0,z0) ((x0) + (y0) * data->x + (z0) * data->x * data->y) #define POS1(x0,y0,z0) ((x0) + (y0) * data.x + (z0) * data.x * data.y) void ReadFile(string & file_path, CONF & data) { fstream fin(file_path, ios_base::in); string tmp; while (getline(fin, tmp)) { if (tmp.find('#') != string::npos) { //this line should be ignored continue; } else break; } int flag = 0; //read type if (tmp.find("2D") != string::npos) { data.type = 2; } else if (tmp.find("3D") != string::npos) { data.type = 3; } flag = 1; while (getline(fin, tmp)) { if (tmp.find('#') != string::npos || tmp[0] == '\r') { //this line should be ignored continue; } else if (flag == 1) { //read k data.k = stof(tmp); flag = 2; } else if (flag == 2) { //read timesteps data.timestep = stoi(tmp); flag = 3; } else if (flag == 3) { //read x,y,z stringstream ss(tmp); ss.str(tmp); char c; if (data.type == 2) { ss >> data.x >> c >> data.y; data.z = 1; } else if (data.type == 3) { ss >> data.x >> c >> data.y >> c >> data.z; } flag = 4; } else if (flag == 4) { //read default starting temperature data.def_temp = stof(tmp); flag = 5; } else if (flag == 5) { //read temperature blocks stringstream ss(tmp); ss.str(tmp); char c; BLOCK b; if (data.type == 2) { ss >> b.x >> c >> b.y >> c >> b.width >> c >> b.height >> c >> b.temp; b.z = 0; b.depth = 1; } else if (data.type == 3) { ss >> b.x >> c >> b.y >> c >> b.z >> c >> b.width >> c >> b.height >> c >> b.depth >> c >> b.temp; } data.Heater.push_back(b); } } fin.close(); } void PrintConf(CONF & data) { cout.setf(ios::fixed); cout << "Dimension: " << data.type << "D" << endl; cout << "k = " << data.k << endl; cout << "timestep = " << data.timestep << endl; cout << "grid axis: \n" << "x = " << data.x << ", y = " << data.y << ", z = " << data.z << endl; cout << "default temperature = " << data.def_temp << endl; cout << "fixed blocks: " << endl; for (auto &i : data.Heater) { cout << "(" << i.x << ',' << i.y << ',' << i.z << "): width = " << i.width << " height = " << i.height << " depth = " << i.depth << " temp = " << i.temp << endl; } cout << endl; } void initialize(CONF &data, vector<float> &u) { // set all nodes to default temperature u.assign(data.x * data.y * data.z, data.def_temp); // set the heater blocks' temperature for (int i = 0; i < data.Heater.size(); i++) { BLOCK b = data.Heater[i]; for (int m = b.z; m < b.z + b.depth; m++) { for (int j = b.y; j < b.y + b.height; j++) { for (int k = b.x; k < b.x + b.width; k++) { u[POS1(k, j, m)] = b.temp; } } } } } void print(const CONF &data, const vector<float> &u) { ofstream ofp; ofp.open("output.csv", ios_base::out); cout.setf(ios::fixed); for (int m = 0; m < data.z; m++) { for (int j = 0; j < data.y; j++) { for (int k = 0; k < data.x - 1; k++) { //cout << u[POS1(k, j, m)] << " "; ofp << u[POS1(k, j, m)] << ','; } //cout << u[POS1(data.x - 1, j, m)] << endl; ofp << u[POS1(data.x - 1, j, m)] << endl; } //cout << endl; ofp << endl; } //cout << endl; ofp.close(); } // if node belongs to the fixed blocks, return true; otherwise return false. __device__ bool srcBlock(int x0, int y0, int z0, BLOCK* &Heater, int count) { for (int i = 0; i < count; i++) { if (x0 >= Heater[i].x && x0 < Heater[i].x + Heater[i].width && y0 >= Heater[i].y && y0 < Heater[i].y + Heater[i].height && z0 >= Heater[i].z && z0 < Heater[i].z + Heater[i].depth) return true; } return false; } __global__ void heat2D3D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int z = blockDim.z * blockIdx.z; //2D: Tnew = Told + k ∗ (Ttop + Tbottom + Tleft + Tright − 4 ∗ Told) //3D: Tnew = Told + k ∗ (Tfront + Tback + Ttop + Tbottom + Tleft + Tright − 6 ∗ Told) if (x < data->x && y < data->y && z < data->z) { if (srcBlock(x, y, z, pHeater, count)) { // fixed blocks, u_new = u u_new[POS(x, y, z)] = u[POS(x, y, z)]; } else if (data->type == 2) { if (x == 0) { if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] - 2 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] - 2 * u[POS(x, y, z)]); } else { //x = 0, 0 < y < Y-1 u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } } else if (x == data->x - 1) { if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] - 2 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] - 2 * u[POS(x, y, z)]); } else { //x = X-1, 0 < y < Y-1 u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } } else { //0 < x < X-1 if (y == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] - 3 * u[POS(x, y, z)]); } else if (y == data->y - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] - 4 * u[POS(x, y, z)]); } } } else if (data->type == 3) { if (x == 0) { if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else { //x = 0, 0 < y < Y-1 if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } } else if (x == data->x - 1) { if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 3 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 3 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } } else { //x = X-1, 0 < y < Y-1 if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } } else { //0 < x < X-1 if (y == 0) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } else if (y == data->y - 1) { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z + 1)] - 4 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] - 4 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } } else { if (z == 0) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z + 1)] - 5 * u[POS(x, y, z)]); } else if (z == data->z - 1) { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] - 5 * u[POS(x, y, z)]); } else { u_new[POS(x, y, z)] = u[POS(x, y, z)] + data->k * (u[POS(x - 1, y, z)] + u[POS(x + 1, y, z)] + u[POS(x, y - 1, z)] + u[POS(x, y + 1, z)] + u[POS(x, y, z - 1)] + u[POS(x, y, z + 1)] - 6 * u[POS(x, y, z)]); } } } } } float *temp = u_new; u_new = u; u = temp; } //glocal variables CONF data; int main(int argc, char* argv[]) { if (argc != 2) { cout << "Input parameters error!" << endl; return 1; } /*****Read Configuration File*****/ string file_path = argv[1]; //string file_path = "C:\\Users\\leyic\\Documents\\Sublime\\6122-p2\\3D.conf"; ReadFile(file_path, data); PrintConf(data); /*****Initialize Data*****/ vector<float> u; vector<float> u_new; initialize(data, u); u_new = u; //print(data, u); // for (auto &i : u) { // cout << i << " "; // } cout << endl; /******Cuda Initialize*****/ dim3 threads_num = dim3(THREAD_SIZE, THREAD_SIZE); dim3 blocks_num = dim3(DIVIDE(data.x, THREAD_SIZE), DIVIDE(data.y, THREAD_SIZE), data.z); //allocate space for device copies float *d_u, *d_u_new, *temp; CONF* d_data; int size = data.x * data.y * data.z; BLOCK* d_heater; int count = data.Heater.size(); cudaMalloc((void **)&d_u, size * sizeof(float)); cudaMalloc((void **)&d_u_new, size * sizeof(float)); cudaMalloc((void **)&d_data, sizeof(CONF)); cudaMalloc((void **)&d_heater, count * sizeof(BLOCK)); //copy inputs to device cudaMemcpy(d_u_new, &u_new[0], size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_data, &data, sizeof(CONF), cudaMemcpyHostToDevice); cudaMemcpy(d_heater, data.Heater.data(), count * sizeof(BLOCK), cudaMemcpyHostToDevice); cudaMemcpy(d_u, &u[0], size * sizeof(float), cudaMemcpyHostToDevice); /*****Compute Heat Diffusion*****/ for (int i = 0; i < data.timestep; i++) { heat2D3D <<< blocks_num, threads_num>>>(d_u, d_u_new, d_data, d_heater, count); temp = d_u_new; d_u_new = d_u; d_u = temp; } cudaMemcpy(&u[0], d_u, size * sizeof(float), cudaMemcpyDeviceToHost); print(data, u); /*****Free*****/ cudaFree(d_u); cudaFree(d_u_new); cudaFree(d_data); cudaFree(d_heater); return 0; }
cd1c20423a84a57e766e93b356cdf5bece692327.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define Npml 10 const float light_velocity = 2.99792458e8; // m s- const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum) const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum) const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum) const float pi = 3.14159265358979323846; // Allocate constant memory for CPML __constant__ float rcmbE[2*Npml]; __constant__ float rcmaE[2*Npml]; __constant__ float rcmbH[2*Npml]; __constant__ float rcmaH[2*Npml]; typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; typedef struct P1F2 { float *f, *b; } P1F2; typedef struct P1F6 { P1F2 x, y, z; } P1F6; __host__ int selectTPB(int Nx, int Ny) { int Ntot = Nx*Ny; int TPB=1; if ( Ntot%32 == 0 ) TPB = 512; else if ( Ntot%16 == 0 ) TPB = 256; else if ( Ntot%8 == 0 ) TPB = 128; else printf("(%d,%d) mismatched TPB!\n", Nx, Ny); return TPB; } __host__ void verify_over_TPB(int TPB) { if ( TPB > 512 ) { printf("Error: An excessive number of threads per block.\n"); exit(0); } } __host__ void verify_over_BPG(int BPG) { if ( BPG > 65535 ) { printf("Error: An excessive number of blocks per grid.\n"); exit(0); } } __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(N3 N, float ***a) { int j,k; for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { printf("%1.4f\t", a[N.x/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(N3 N) { float ***f; f = (float ***) calloc (N.x, sizeof(float **)); f[0] = (float **) calloc (N.y*N.x, sizeof(float *)); f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float)); for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y; for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z; return f; } __host__ void set_geometry(N3 N, P3F3 CE) { int i,j,k; for (i=0; i<N.x; i++) { for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; } } } } __global__ void initArray(int Ntot, float *a) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx < Ntot ) a[idx] = 0; } __host__ void initMainArrays(N3 N, int Nzpit, P1F3 F) { int TPB=512; int Ntot = (N.x+1)*N.y*Nzpit; int BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, F.x); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, F.y); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, F.z); } __host__ void initPsiArrays(N3 N, int Nzpit, int Npmlpit, P1F6 psix, P1F6 psiy, P1F6 psiz) { int TPB=512; int Ntot, BPG; Ntot = Npml*N.y*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psix.y.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psix.y.b); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psix.z.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psix.z.b); Ntot = N.x*Npml*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiy.z.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiy.z.b); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiy.x.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiy.x.b); Ntot = N.x*N.y*Npmlpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiz.x.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiz.x.b); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiz.y.f); hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, Ntot, psiz.y.b); } __host__ void freeMainArrays(P1F3 F) { hipFree(F.x); hipFree(F.y); hipFree(F.z); } __host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) { hipFree(psix.y.f); hipFree(psix.y.b); hipFree(psix.z.f); hipFree(psix.z.b); hipFree(psiy.z.f); hipFree(psiy.z.b); hipFree(psiy.x.f); hipFree(psiy.x.b); hipFree(psiz.x.f); hipFree(psiz.x.b); hipFree(psiz.y.f); hipFree(psiz.y.b); } __global__ void updateE(int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE) { int tk = threadIdx.x; int idx = blockIdx.x*TPB + tk; int eidx = idx + Nyzpit; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; float* hz = (float*) &hy[TPB+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==TPB-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } __syncthreads(); E.x[eidx] += CE.x[idx]*( H.z[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] ); E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyzpit] + hz[tk] ); E.z[eidx] += CE.z[idx]*( H.y[idx+Nyzpit] - hy[tk] - H.x[idx+Nzpit] + hx[tk] ); } __global__ void updateH(int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H) { int tk = threadIdx.x; int idx = blockIdx.x*TPB + tk; int eidx = idx + Nyzpit; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[TPB+1]; float* ez = (float*) &ey[TPB+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; ez[tk] = E.z[eidx]; if ( tk==0 ) { ex[0] = E.x[eidx-1]; ey[0] = E.y[eidx-1]; } __syncthreads(); H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-Nzpit] - ey[tk+1] + ey[tk] ); H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyzpit] ); H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyzpit] - ex[tk+1] + E.x[eidx-Nzpit] ); } __global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) { int idx = threadIdx.x; // int ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2); int ijk = (N.x/2+1)*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + idx; //E.x[ijk] += sin(0.1*tstep); E.z[ijk] += sin(0.1*tstep); } __global__ void updateCPMLxE( int Nx, int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int pi = pidx/Nyzpit + backward*Npml; int idx = pidx + backward*(Nx-Npml-1)*Nyzpit; int eidx = idx + Nyzpit; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyzpit] - H.z[idx] ); E.y[eidx] -= CE.y[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyzpit] - H.y[idx] ); E.z[eidx] += CE.z[idx]*psi2[pidx]; } __global__ void updateCPMLxH( int Nx, int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int pi = pidx/Nyzpit + backward*Npml; int idx = pidx + backward*(Nx-Npml)*Nyzpit; int eidx = idx + Nyzpit; psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyzpit] ); H.y[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyzpit] ); H.z[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLyE( int Ny, int Nzpit, int Npmlzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Npmlzpit; int pj = ( pidx - i*Npmlzpit )/Nzpit + backward*Npml; int idx = pidx + (i+backward)*(Ny-Npml)*Nzpit - backward*Nzpit; int eidx = idx + Ny*Nzpit; psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+Nzpit] - H.x[idx] ); E.z[eidx] -= CE.z[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+Nzpit] - H.z[idx] ); E.x[eidx] += CE.x[idx]*psi2[pidx]; } __global__ void updateCPMLyH( int Ny, int Nzpit, int Npmlzpit, int TPB, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Npmlzpit; int pj = ( pidx - i*Npmlzpit )/Nzpit + backward*Npml; int idx = pidx + (i+backward)*(Ny-Npml)*Nzpit; int eidx = idx + Ny*Nzpit; psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-Nzpit] ); H.z[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-Nzpit] ); H.x[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLzE( int Ny, int Nzpit, int Npmlpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Ny*Npmlpit; int j = ( pidx - i*Ny*Npmlpit )/Npmlpit; int pk = pidx - i*Ny*Npmlpit - j*Npmlpit; int idx = pidx + (j+i*Ny)*(Nzpit-Npmlpit);// + backward*(N.z-Npml-1); int eidx = idx + Ny*Nzpit; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; hx[pk] = H.x[idx]; hy[pk] = H.y[idx]; if ( pk==TPB-1 ) { hx[pk+1] = H.x[idx+1]; hy[pk+1] = H.y[idx+1]; } __syncthreads(); if ( pk<Npml ) { int pi = pk + backward*Npml; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( hy[pk+1] - hy[pk] ); E.x[eidx] -= CE.x[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( hx[pk+1] - hx[pk] ); E.y[eidx] += CE.y[idx]*psi2[pidx]; } } __global__ void init_boundary_xE(N3 N, int Nyzpit, P1F3 E) { int idx = N.x*Nyzpit + blockIdx.x*blockDim.x + threadIdx.x; if ( idx/Nyzpit == N.x ) { E.y[idx] = 0; E.z[idx] = 0; } } __global__ void init_boundary_yE(N3 N, int Nzpit, int Nyzpit, P1F3 E) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/Nzpit; int idx = (i+2)*Nyzpit - (i+1)*Nzpit + ti; if ( i<N.x ) { E.z[idx] = 0; E.x[idx] = 0; } } __global__ void init_boundary_zE(N3 N, int Nzpit, int Nyzpit, P1F3 E) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/N.y; int idx = (N.z-1) + (i+1)*Nyzpit + (ti-i*N.y)*Nzpit; if ( i<N.x ) { E.x[idx] = 0; E.y[idx] = 0; } } __global__ void init_boundary_xH(N3 N, int Nyzpit, P1F3 H) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx/Nyzpit == 0 ) { H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void init_boundary_yH(N3 N, int Nzpit, int Nyzpit, P1F3 H) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/Nzpit; int idx = i*Nyzpit - i*Nzpit + ti; if ( i<N.x ) { H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void init_boundary_zH(N3 N, int Nzpit, int Nyzpit, P1F3 H) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/N.y; int idx = i*Nyzpit + (ti-i*N.y)*Nzpit; //int i0 = idx/Nyzpit; //int j0 = (idx - i0*Nyzpit)/Nzpit; //int k0 = idx - i0*Nyzpit - j0*Nzpit; //printf("[%d,%d,%d] %d\n", i0,j0,k0,idx); if ( i<N.x ) { //printf("\t\t\tIn: [%d,%d,%d] %d\n", i0,j0,k0,idx); H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __host__ void update_boundary_E(N3 N, int Nzpit, int Nyzpit, P1F3 devE) { int Ntot, BPG; int TPB = 128; /* Ntot = Nyzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_xE <<<dim3(BPG),dim3(TPB)>>> ( N, Nyzpit, devE ); Ntot = N.x*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_yE <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devE ); */ Ntot = N.x*N.y; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; hipLaunchKernelGGL(( init_boundary_zE) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, N, Nzpit, Nyzpit, devE ); } __host__ void update_boundary_H(N3 N, int Nzpit, int Nyzpit, P1F3 devH) { int Ntot, BPG; int TPB = 128; /* Ntot = Nyzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_xH <<<dim3(BPG),dim3(TPB)>>> ( N, Nyzpit, devH ); Ntot = N.x*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_yH <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devH ); */ Ntot = N.x*N.y; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; hipLaunchKernelGGL(( init_boundary_zH) , dim3(dim3(BPG)),dim3(dim3(TPB)), 0, 0, N, Nzpit, Nyzpit, devH ); } int main() { int tstep; char time_str[32]; time_t t0; int i; // Set the parameters N3 N; N.x = 200; N.y = 200; N.z = 200; //N.y = 16; //N.z = 20; int TMAX = 10000; float S = 0.5; float dx = 10e-9; float dt = S*dx/light_velocity; printf("NPML=%d\n", Npml); printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); // Allocate host memory P3F3 CE; CE.x = makeArray(N); CE.y = makeArray(N); CE.z = makeArray(N); float ***Ex, ***Ez; N3 Nxp; Nxp.x = N.x+1; Nxp.y = N.y; Nxp.z = N.z; Ex = makeArray(Nxp); Ez = makeArray(Nxp); // Geometry set_geometry(N, CE); // CPML int m = 4; // grade_order float sigma_max = (m+1.)/(15*pi*Npml*dx); float alpha = 0.05; float *sigmaE, *bE, *aE; float *sigmaH, *bH, *aH; sigmaE = (float *) calloc (2*Npml, sizeof(float)); sigmaH = (float *) calloc (2*Npml, sizeof(float)); bE = (float *) calloc (2*Npml, sizeof(float)); bH = (float *) calloc (2*Npml, sizeof(float)); aE = (float *) calloc (2*Npml, sizeof(float)); aH = (float *) calloc (2*Npml, sizeof(float)); for (i=0; i<Npml; i++) { sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max; sigmaE[i+Npml] = pow( (0.5+i)/Npml, m )*sigma_max; sigmaH[i] = pow( (float)(Npml-i)/Npml, m )*sigma_max; sigmaH[i+Npml] = pow( (1.+i)/Npml, m )*sigma_max; } for (i=0; i<2*Npml; i++) { bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 ); bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 ); aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1); aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1); //printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]); //printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]); } free(sigmaE); free(sigmaH); // Copy arrays from host to constant memory hipMemcpyToSymbol(rcmbE, bE, 2*Npml*sizeof(float)); hipMemcpyToSymbol(rcmaE, aE, 2*Npml*sizeof(float)); hipMemcpyToSymbol(rcmbH, bH, 2*Npml*sizeof(float)); hipMemcpyToSymbol(rcmaH, aH, 2*Npml*sizeof(float)); // Allocate device memory P1F3 devE, devH; P1F3 devCE; int z_size = N.z*sizeof(float); size_t pitch; hipMallocPitch ( (void**) &devE.x, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devE.y, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devE.z, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devH.x, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devH.y, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devH.z, &pitch, z_size, (N.x+1)*N.y ); hipMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y ); // Allocate device memory for CPML P1F6 psixE, psiyE, psizE; P1F6 psixH, psiyH, psizH; hipMallocPitch ( (void**) &psixE.y.f, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixE.y.b, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixE.z.f, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixE.z.b, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psiyE.z.f, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyE.z.b, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyE.x.f, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyE.x.b, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psixH.y.f, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixH.y.b, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixH.z.f, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psixH.z.b, &pitch, z_size, Npml*N.y ); hipMallocPitch ( (void**) &psiyH.z.f, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyH.z.b, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyH.x.f, &pitch, z_size, N.x*Npml ); hipMallocPitch ( (void**) &psiyH.x.b, &pitch, z_size, N.x*Npml ); int z_size_pml = Npml*sizeof(float); size_t pitch_pmlz; hipMallocPitch ( (void**) &psizE.x.f, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizE.x.b, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizE.y.f, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizE.y.b, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizH.x.f, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizH.x.b, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizH.y.f, &pitch_pmlz, z_size_pml, N.x*N.y ); hipMallocPitch ( (void**) &psizH.y.b, &pitch_pmlz, z_size_pml, N.x*N.y ); // Copy arrays from host to device hipMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); hipMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); hipMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); int Nz_pitch = pitch/4; int Nzpml_pitch = pitch_pmlz/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); printf("pitch_pmlz= %u, Nzpml_pitch= %d\n", pitch_pmlz, Nzpml_pitch); // Set the GPU parameters // TPB: Number of threads per block // BPG: Number of thread blocks per grid int Ntot = N.x*N.y*Nz_pitch; int TPBmain = selectTPB(N.x, N.y); int BPGmain = Ntot%TPBmain == 0 ? Ntot/TPBmain : Ntot/TPBmain + 1; dim3 Dg = dim3(BPGmain); dim3 Db = dim3(TPBmain); size_t Ns = sizeof(float)*( 2*(TPBmain+1)+(TPBmain) ); printf("Threads per block: %d\n", TPBmain); printf("Blocks per grid: %d\n", BPGmain); verify_over_TPB( TPBmain ); verify_over_BPG( BPGmain ); printf("Number of bytes in shared memory: %d\n", Ns); //int TPBsrc = N.x; int TPBsrc = N.z; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int TPBpmlx = selectTPB(Npml, N.y); int Ntotpmlx = Npml*N.y*Nz_pitch; int BPGpmlx = Ntotpmlx%TPBpmlx == 0 ? Ntotpmlx/TPBpmlx : Ntotpmlx/TPBpmlx + 1; dim3 Dgpmlx(BPGpmlx); dim3 Dbpmlx(TPBpmlx); printf("CPMLx: Threads per block: %d\n", TPBpmlx); printf("CPMLx: Blocks per grid: %d\n", BPGpmlx); verify_over_BPG( BPGpmlx ); int TPBpmly = selectTPB(N.x, Npml); int Ntotpmly = N.x*Npml*Nz_pitch; int BPGpmly = Ntotpmly%TPBpmly == 0 ? Ntotpmly/TPBpmly : Ntotpmly/TPBpmly + 1; dim3 Dgpmly(BPGpmly); dim3 Dbpmly(TPBpmly); printf("CPMLy: Threads per block: %d\n", TPBpmly); printf("CPMLy: Blocks per grid: %d\n", BPGpmly); verify_over_BPG( BPGpmly ); int TPBpmlz = selectTPB(N.x, N.y); int Ntotpmlz = N.x*N.y*Nzpml_pitch; int BPGpmlz = Ntotpmlz%TPBpmlz == 0 ? Ntotpmlz/TPBpmlz : Ntotpmlz/TPBpmlz + 1; dim3 Dgpmlz(BPGpmlz); dim3 Dbpmlz(TPBpmlz); printf("CPMLz: Threads per block: %d\n", TPBpmlz); printf("CPMLz: Blocks per grid: %d\n", BPGpmlz); verify_over_BPG( BPGpmlz ); size_t Nspmlz = sizeof(float)*( 2*(TPBpmlz+1) ); // Initialize the device arrays initMainArrays ( N, Nz_pitch, devE ); initMainArrays ( N, Nz_pitch, devH ); initPsiArrays ( N, Nz_pitch, Nzpml_pitch, psixE, psiyE, psizE ); initPsiArrays ( N, Nz_pitch, Nzpml_pitch, psixH, psiyH, psizH ); // Main time loop t0 = time(0); //for ( tstep=1; tstep<=TMAX; tstep++) { for ( tstep=1; tstep<=500; tstep++) { // Update on the GPU hipLaunchKernelGGL(( updateE) , dim3(Dg),dim3(Db),Ns, 0, Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH, devCE ); update_boundary_E(N, Nz_pitch, N.y*Nz_pitch, devE); hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dgpmly),dim3(Dbpmly), 0, 0, N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dgpmly),dim3(Dbpmly), 0, 0, N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); //updateCPMLzE <<<Dgpmlz,Dbpmlz,Nspmlz>>> ( N.y, Nz_pitch, Nzpml_pitch, TPBpmlz, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); hipLaunchKernelGGL(( updateSrc) , dim3(Dgsrc),dim3(Dbsrc), 0, 0, N, Nz_pitch, devE, tstep ); hipLaunchKernelGGL(( updateH) , dim3(Dg),dim3(Db),Ns, 0, Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH ); update_boundary_H(N, Nz_pitch, N.y*Nz_pitch, devH); hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, psixH.y.f, psixH.z.f, 0); hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, psixH.y.b, psixH.z.b, 1); hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dgpmly),dim3(Dbpmly), 0, 0, N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, psiyH.z.f, psiyH.x.f, 0); hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dgpmly),dim3(Dbpmly), 0, 0, N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, psiyH.z.b, psiyH.x.b, 1); if ( tstep/10*10 == tstep ) { // Copy arrays from device to host //hipMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, (N.x+1)*N.y, hipMemcpyDeviceToHost ); hipMemcpy2D( Ez[0][0], z_size, devE.z, pitch, z_size, (N.x+1)*N.y, hipMemcpyDeviceToHost ); //print_array(N, Ex); //dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+1, N.y, N.z, 0, 0, N.z/2, N.x, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep); //dumpToH5(N.x+1, N.y, N.z, 0, 0, 0, N.x, N.y-1, 0, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); } } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); //update_boundary_E(N, Nz_pitch, N.y*Nz_pitch, devE); //update_boundary_H(N, Nz_pitch, N.y*Nz_pitch, devH); //for ( tstep=1; tstep<=10; tstep++ ) updateE <<<Dg,Db,Ns>>> ( Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH, devCE ); //for ( tstep=1; tstep<=10; tstep++ ) updateH <<<Dg,Db,Ns>>> ( N, Nz_pitch, TPBmain, devE, devH ); //for ( tstep=1; tstep<=10; tstep++ )hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); //for ( tstep=1; tstep<=10; tstep++ )hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dgpmlx),dim3(Dbpmlx), 0, 0, N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); }
cd1c20423a84a57e766e93b356cdf5bece692327.cu
#include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define Npml 10 const float light_velocity = 2.99792458e8; // m s- const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum) const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum) const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum) const float pi = 3.14159265358979323846; // Allocate constant memory for CPML __constant__ float rcmbE[2*Npml]; __constant__ float rcmaE[2*Npml]; __constant__ float rcmbH[2*Npml]; __constant__ float rcmaH[2*Npml]; typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; typedef struct P1F2 { float *f, *b; } P1F2; typedef struct P1F6 { P1F2 x, y, z; } P1F6; __host__ int selectTPB(int Nx, int Ny) { int Ntot = Nx*Ny; int TPB=1; if ( Ntot%32 == 0 ) TPB = 512; else if ( Ntot%16 == 0 ) TPB = 256; else if ( Ntot%8 == 0 ) TPB = 128; else printf("(%d,%d) mismatched TPB!\n", Nx, Ny); return TPB; } __host__ void verify_over_TPB(int TPB) { if ( TPB > 512 ) { printf("Error: An excessive number of threads per block.\n"); exit(0); } } __host__ void verify_over_BPG(int BPG) { if ( BPG > 65535 ) { printf("Error: An excessive number of blocks per grid.\n"); exit(0); } } __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(N3 N, float ***a) { int j,k; for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { printf("%1.4f\t", a[N.x/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(N3 N) { float ***f; f = (float ***) calloc (N.x, sizeof(float **)); f[0] = (float **) calloc (N.y*N.x, sizeof(float *)); f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float)); for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y; for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z; return f; } __host__ void set_geometry(N3 N, P3F3 CE) { int i,j,k; for (i=0; i<N.x; i++) { for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; } } } } __global__ void initArray(int Ntot, float *a) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx < Ntot ) a[idx] = 0; } __host__ void initMainArrays(N3 N, int Nzpit, P1F3 F) { int TPB=512; int Ntot = (N.x+1)*N.y*Nzpit; int BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, F.x); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, F.y); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, F.z); } __host__ void initPsiArrays(N3 N, int Nzpit, int Npmlpit, P1F6 psix, P1F6 psiy, P1F6 psiz) { int TPB=512; int Ntot, BPG; Ntot = Npml*N.y*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psix.y.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psix.y.b); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psix.z.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psix.z.b); Ntot = N.x*Npml*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiy.z.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiy.z.b); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiy.x.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiy.x.b); Ntot = N.x*N.y*Npmlpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; verify_over_BPG( BPG ); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiz.x.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiz.x.b); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiz.y.f); initArray <<<dim3(BPG),dim3(TPB)>>> (Ntot, psiz.y.b); } __host__ void freeMainArrays(P1F3 F) { cudaFree(F.x); cudaFree(F.y); cudaFree(F.z); } __host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) { cudaFree(psix.y.f); cudaFree(psix.y.b); cudaFree(psix.z.f); cudaFree(psix.z.b); cudaFree(psiy.z.f); cudaFree(psiy.z.b); cudaFree(psiy.x.f); cudaFree(psiy.x.b); cudaFree(psiz.x.f); cudaFree(psiz.x.b); cudaFree(psiz.y.f); cudaFree(psiz.y.b); } __global__ void updateE(int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE) { int tk = threadIdx.x; int idx = blockIdx.x*TPB + tk; int eidx = idx + Nyzpit; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; float* hz = (float*) &hy[TPB+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==TPB-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } __syncthreads(); E.x[eidx] += CE.x[idx]*( H.z[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] ); E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyzpit] + hz[tk] ); E.z[eidx] += CE.z[idx]*( H.y[idx+Nyzpit] - hy[tk] - H.x[idx+Nzpit] + hx[tk] ); } __global__ void updateH(int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H) { int tk = threadIdx.x; int idx = blockIdx.x*TPB + tk; int eidx = idx + Nyzpit; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[TPB+1]; float* ez = (float*) &ey[TPB+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; ez[tk] = E.z[eidx]; if ( tk==0 ) { ex[0] = E.x[eidx-1]; ey[0] = E.y[eidx-1]; } __syncthreads(); H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-Nzpit] - ey[tk+1] + ey[tk] ); H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyzpit] ); H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyzpit] - ex[tk+1] + E.x[eidx-Nzpit] ); } __global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) { int idx = threadIdx.x; // int ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2); int ijk = (N.x/2+1)*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + idx; //E.x[ijk] += sin(0.1*tstep); E.z[ijk] += sin(0.1*tstep); } __global__ void updateCPMLxE( int Nx, int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int pi = pidx/Nyzpit + backward*Npml; int idx = pidx + backward*(Nx-Npml-1)*Nyzpit; int eidx = idx + Nyzpit; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyzpit] - H.z[idx] ); E.y[eidx] -= CE.y[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyzpit] - H.y[idx] ); E.z[eidx] += CE.z[idx]*psi2[pidx]; } __global__ void updateCPMLxH( int Nx, int Nzpit, int Nyzpit, int TPB, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int pi = pidx/Nyzpit + backward*Npml; int idx = pidx + backward*(Nx-Npml)*Nyzpit; int eidx = idx + Nyzpit; psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyzpit] ); H.y[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyzpit] ); H.z[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLyE( int Ny, int Nzpit, int Npmlzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Npmlzpit; int pj = ( pidx - i*Npmlzpit )/Nzpit + backward*Npml; int idx = pidx + (i+backward)*(Ny-Npml)*Nzpit - backward*Nzpit; int eidx = idx + Ny*Nzpit; psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+Nzpit] - H.x[idx] ); E.z[eidx] -= CE.z[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+Nzpit] - H.z[idx] ); E.x[eidx] += CE.x[idx]*psi2[pidx]; } __global__ void updateCPMLyH( int Ny, int Nzpit, int Npmlzpit, int TPB, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Npmlzpit; int pj = ( pidx - i*Npmlzpit )/Nzpit + backward*Npml; int idx = pidx + (i+backward)*(Ny-Npml)*Nzpit; int eidx = idx + Ny*Nzpit; psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-Nzpit] ); H.z[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-Nzpit] ); H.x[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLzE( int Ny, int Nzpit, int Npmlpit, int TPB, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*TPB + threadIdx.x; int i = pidx/Ny*Npmlpit; int j = ( pidx - i*Ny*Npmlpit )/Npmlpit; int pk = pidx - i*Ny*Npmlpit - j*Npmlpit; int idx = pidx + (j+i*Ny)*(Nzpit-Npmlpit);// + backward*(N.z-Npml-1); int eidx = idx + Ny*Nzpit; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; hx[pk] = H.x[idx]; hy[pk] = H.y[idx]; if ( pk==TPB-1 ) { hx[pk+1] = H.x[idx+1]; hy[pk+1] = H.y[idx+1]; } __syncthreads(); if ( pk<Npml ) { int pi = pk + backward*Npml; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( hy[pk+1] - hy[pk] ); E.x[eidx] -= CE.x[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( hx[pk+1] - hx[pk] ); E.y[eidx] += CE.y[idx]*psi2[pidx]; } } __global__ void init_boundary_xE(N3 N, int Nyzpit, P1F3 E) { int idx = N.x*Nyzpit + blockIdx.x*blockDim.x + threadIdx.x; if ( idx/Nyzpit == N.x ) { E.y[idx] = 0; E.z[idx] = 0; } } __global__ void init_boundary_yE(N3 N, int Nzpit, int Nyzpit, P1F3 E) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/Nzpit; int idx = (i+2)*Nyzpit - (i+1)*Nzpit + ti; if ( i<N.x ) { E.z[idx] = 0; E.x[idx] = 0; } } __global__ void init_boundary_zE(N3 N, int Nzpit, int Nyzpit, P1F3 E) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/N.y; int idx = (N.z-1) + (i+1)*Nyzpit + (ti-i*N.y)*Nzpit; if ( i<N.x ) { E.x[idx] = 0; E.y[idx] = 0; } } __global__ void init_boundary_xH(N3 N, int Nyzpit, P1F3 H) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if ( idx/Nyzpit == 0 ) { H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void init_boundary_yH(N3 N, int Nzpit, int Nyzpit, P1F3 H) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/Nzpit; int idx = i*Nyzpit - i*Nzpit + ti; if ( i<N.x ) { H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void init_boundary_zH(N3 N, int Nzpit, int Nyzpit, P1F3 H) { int ti = blockIdx.x*blockDim.x + threadIdx.x; int i = ti/N.y; int idx = i*Nyzpit + (ti-i*N.y)*Nzpit; //int i0 = idx/Nyzpit; //int j0 = (idx - i0*Nyzpit)/Nzpit; //int k0 = idx - i0*Nyzpit - j0*Nzpit; //printf("[%d,%d,%d] %d\n", i0,j0,k0,idx); if ( i<N.x ) { //printf("\t\t\tIn: [%d,%d,%d] %d\n", i0,j0,k0,idx); H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __host__ void update_boundary_E(N3 N, int Nzpit, int Nyzpit, P1F3 devE) { int Ntot, BPG; int TPB = 128; /* Ntot = Nyzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_xE <<<dim3(BPG),dim3(TPB)>>> ( N, Nyzpit, devE ); Ntot = N.x*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_yE <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devE ); */ Ntot = N.x*N.y; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_zE <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devE ); } __host__ void update_boundary_H(N3 N, int Nzpit, int Nyzpit, P1F3 devH) { int Ntot, BPG; int TPB = 128; /* Ntot = Nyzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_xH <<<dim3(BPG),dim3(TPB)>>> ( N, Nyzpit, devH ); Ntot = N.x*Nzpit; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_yH <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devH ); */ Ntot = N.x*N.y; BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; init_boundary_zH <<<dim3(BPG),dim3(TPB)>>> ( N, Nzpit, Nyzpit, devH ); } int main() { int tstep; char time_str[32]; time_t t0; int i; // Set the parameters N3 N; N.x = 200; N.y = 200; N.z = 200; //N.y = 16; //N.z = 20; int TMAX = 10000; float S = 0.5; float dx = 10e-9; float dt = S*dx/light_velocity; printf("NPML=%d\n", Npml); printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); // Allocate host memory P3F3 CE; CE.x = makeArray(N); CE.y = makeArray(N); CE.z = makeArray(N); float ***Ex, ***Ez; N3 Nxp; Nxp.x = N.x+1; Nxp.y = N.y; Nxp.z = N.z; Ex = makeArray(Nxp); Ez = makeArray(Nxp); // Geometry set_geometry(N, CE); // CPML int m = 4; // grade_order float sigma_max = (m+1.)/(15*pi*Npml*dx); float alpha = 0.05; float *sigmaE, *bE, *aE; float *sigmaH, *bH, *aH; sigmaE = (float *) calloc (2*Npml, sizeof(float)); sigmaH = (float *) calloc (2*Npml, sizeof(float)); bE = (float *) calloc (2*Npml, sizeof(float)); bH = (float *) calloc (2*Npml, sizeof(float)); aE = (float *) calloc (2*Npml, sizeof(float)); aH = (float *) calloc (2*Npml, sizeof(float)); for (i=0; i<Npml; i++) { sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max; sigmaE[i+Npml] = pow( (0.5+i)/Npml, m )*sigma_max; sigmaH[i] = pow( (float)(Npml-i)/Npml, m )*sigma_max; sigmaH[i+Npml] = pow( (1.+i)/Npml, m )*sigma_max; } for (i=0; i<2*Npml; i++) { bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 ); bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 ); aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1); aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1); //printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]); //printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]); } free(sigmaE); free(sigmaH); // Copy arrays from host to constant memory cudaMemcpyToSymbol(rcmbE, bE, 2*Npml*sizeof(float)); cudaMemcpyToSymbol(rcmaE, aE, 2*Npml*sizeof(float)); cudaMemcpyToSymbol(rcmbH, bH, 2*Npml*sizeof(float)); cudaMemcpyToSymbol(rcmaH, aH, 2*Npml*sizeof(float)); // Allocate device memory P1F3 devE, devH; P1F3 devCE; int z_size = N.z*sizeof(float); size_t pitch; cudaMallocPitch ( (void**) &devE.x, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devE.y, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devE.z, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devH.x, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devH.y, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devH.z, &pitch, z_size, (N.x+1)*N.y ); cudaMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y ); // Allocate device memory for CPML P1F6 psixE, psiyE, psizE; P1F6 psixH, psiyH, psizH; cudaMallocPitch ( (void**) &psixE.y.f, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixE.y.b, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixE.z.f, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixE.z.b, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psiyE.z.f, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyE.z.b, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyE.x.f, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyE.x.b, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psixH.y.f, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixH.y.b, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixH.z.f, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psixH.z.b, &pitch, z_size, Npml*N.y ); cudaMallocPitch ( (void**) &psiyH.z.f, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyH.z.b, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyH.x.f, &pitch, z_size, N.x*Npml ); cudaMallocPitch ( (void**) &psiyH.x.b, &pitch, z_size, N.x*Npml ); int z_size_pml = Npml*sizeof(float); size_t pitch_pmlz; cudaMallocPitch ( (void**) &psizE.x.f, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizE.x.b, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizE.y.f, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizE.y.b, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizH.x.f, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizH.x.b, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizH.y.f, &pitch_pmlz, z_size_pml, N.x*N.y ); cudaMallocPitch ( (void**) &psizH.y.b, &pitch_pmlz, z_size_pml, N.x*N.y ); // Copy arrays from host to device cudaMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); int Nz_pitch = pitch/4; int Nzpml_pitch = pitch_pmlz/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); printf("pitch_pmlz= %u, Nzpml_pitch= %d\n", pitch_pmlz, Nzpml_pitch); // Set the GPU parameters // TPB: Number of threads per block // BPG: Number of thread blocks per grid int Ntot = N.x*N.y*Nz_pitch; int TPBmain = selectTPB(N.x, N.y); int BPGmain = Ntot%TPBmain == 0 ? Ntot/TPBmain : Ntot/TPBmain + 1; dim3 Dg = dim3(BPGmain); dim3 Db = dim3(TPBmain); size_t Ns = sizeof(float)*( 2*(TPBmain+1)+(TPBmain) ); printf("Threads per block: %d\n", TPBmain); printf("Blocks per grid: %d\n", BPGmain); verify_over_TPB( TPBmain ); verify_over_BPG( BPGmain ); printf("Number of bytes in shared memory: %d\n", Ns); //int TPBsrc = N.x; int TPBsrc = N.z; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int TPBpmlx = selectTPB(Npml, N.y); int Ntotpmlx = Npml*N.y*Nz_pitch; int BPGpmlx = Ntotpmlx%TPBpmlx == 0 ? Ntotpmlx/TPBpmlx : Ntotpmlx/TPBpmlx + 1; dim3 Dgpmlx(BPGpmlx); dim3 Dbpmlx(TPBpmlx); printf("CPMLx: Threads per block: %d\n", TPBpmlx); printf("CPMLx: Blocks per grid: %d\n", BPGpmlx); verify_over_BPG( BPGpmlx ); int TPBpmly = selectTPB(N.x, Npml); int Ntotpmly = N.x*Npml*Nz_pitch; int BPGpmly = Ntotpmly%TPBpmly == 0 ? Ntotpmly/TPBpmly : Ntotpmly/TPBpmly + 1; dim3 Dgpmly(BPGpmly); dim3 Dbpmly(TPBpmly); printf("CPMLy: Threads per block: %d\n", TPBpmly); printf("CPMLy: Blocks per grid: %d\n", BPGpmly); verify_over_BPG( BPGpmly ); int TPBpmlz = selectTPB(N.x, N.y); int Ntotpmlz = N.x*N.y*Nzpml_pitch; int BPGpmlz = Ntotpmlz%TPBpmlz == 0 ? Ntotpmlz/TPBpmlz : Ntotpmlz/TPBpmlz + 1; dim3 Dgpmlz(BPGpmlz); dim3 Dbpmlz(TPBpmlz); printf("CPMLz: Threads per block: %d\n", TPBpmlz); printf("CPMLz: Blocks per grid: %d\n", BPGpmlz); verify_over_BPG( BPGpmlz ); size_t Nspmlz = sizeof(float)*( 2*(TPBpmlz+1) ); // Initialize the device arrays initMainArrays ( N, Nz_pitch, devE ); initMainArrays ( N, Nz_pitch, devH ); initPsiArrays ( N, Nz_pitch, Nzpml_pitch, psixE, psiyE, psizE ); initPsiArrays ( N, Nz_pitch, Nzpml_pitch, psixH, psiyH, psizH ); // Main time loop t0 = time(0); //for ( tstep=1; tstep<=TMAX; tstep++) { for ( tstep=1; tstep<=500; tstep++) { // Update on the GPU updateE <<<Dg,Db,Ns>>> ( Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH, devCE ); update_boundary_E(N, Nz_pitch, N.y*Nz_pitch, devE); updateCPMLxE <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); updateCPMLxE <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); updateCPMLyE <<<Dgpmly,Dbpmly>>> ( N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); updateCPMLyE <<<Dgpmly,Dbpmly>>> ( N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); //updateCPMLzE <<<Dgpmlz,Dbpmlz,Nspmlz>>> ( N.y, Nz_pitch, Nzpml_pitch, TPBpmlz, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); updateSrc <<<Dgsrc,Dbsrc>>> ( N, Nz_pitch, devE, tstep ); updateH <<<Dg,Db,Ns>>> ( Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH ); update_boundary_H(N, Nz_pitch, N.y*Nz_pitch, devH); updateCPMLxH <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, psixH.y.f, psixH.z.f, 0); updateCPMLxH <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, psixH.y.b, psixH.z.b, 1); updateCPMLyH <<<Dgpmly,Dbpmly>>> ( N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, psiyH.z.f, psiyH.x.f, 0); updateCPMLyH <<<Dgpmly,Dbpmly>>> ( N.y, Nz_pitch, Npml*Nz_pitch, TPBpmly, devE, devH, psiyH.z.b, psiyH.x.b, 1); if ( tstep/10*10 == tstep ) { // Copy arrays from device to host //cudaMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, (N.x+1)*N.y, cudaMemcpyDeviceToHost ); cudaMemcpy2D( Ez[0][0], z_size, devE.z, pitch, z_size, (N.x+1)*N.y, cudaMemcpyDeviceToHost ); //print_array(N, Ex); //dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+1, N.y, N.z, 0, 0, N.z/2, N.x, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep); //dumpToH5(N.x+1, N.y, N.z, 0, 0, 0, N.x, N.y-1, 0, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); } } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); //update_boundary_E(N, Nz_pitch, N.y*Nz_pitch, devE); //update_boundary_H(N, Nz_pitch, N.y*Nz_pitch, devH); //for ( tstep=1; tstep<=10; tstep++ ) updateE <<<Dg,Db,Ns>>> ( Nz_pitch, N.y*Nz_pitch, TPBmain, devE, devH, devCE ); //for ( tstep=1; tstep<=10; tstep++ ) updateH <<<Dg,Db,Ns>>> ( N, Nz_pitch, TPBmain, devE, devH ); //for ( tstep=1; tstep<=10; tstep++ ) updateCPMLxE <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); //for ( tstep=1; tstep<=10; tstep++ ) updateCPMLxE <<<Dgpmlx,Dbpmlx>>> ( N.x, Nz_pitch, N.y*Nz_pitch, TPBpmlx, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); }
c8208bab81cc34cab7265829c1c6d65c1adc35d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "CudaLib.h" #include <stdio.h> namespace Procon2018 { namespace CudaLib { __global__ void d_CalcAreaPoint(int ***gc) { } } } hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
c8208bab81cc34cab7265829c1c6d65c1adc35d2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "CudaLib.h" #include <stdio.h> namespace Procon2018 { namespace CudaLib { __global__ void d_CalcAreaPoint(int ***gc) { } } } cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }