hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
54d91fbbfd82f2bb1c37654c010740bc1a695ad1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of hipcub::BlockReduce with dynamic shared memory * * To compile using the command line: * nvcc -arch=sm_XX example_block_reduce_dyn_smem.cu -I../.. -lcudart -O3 -std=c++14 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <algorithm> #include <iostream> #include <hipcub/hipcub.hpp> #include <cub/block/block_store.cuh> #include <hipcub/hipcub.hpp> #include "../../test/test_util.h" // Some implementation details rely on c++14 #if CUB_CPP_DIALECT >= 2014 using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- /// Verbose output bool g_verbose = false; /// Default grid size int g_grid_size = 1; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide reduction. */ template <int BLOCK_THREADS> __global__ void BlockReduceKernel( int *d_in, // Tile of input int *d_out // Tile aggregate ) { // Specialize BlockReduce type for our thread block using BlockReduceT = hipcub::BlockReduce<int, BLOCK_THREADS>; using TempStorageT = typename BlockReduceT::TempStorage; union ShmemLayout { TempStorageT reduce; int aggregate; }; // shared memory byte-array extern __shared__ __align__(alignof(ShmemLayout)) char smem[]; // cast to lvalue reference of expected type auto& temp_storage = reinterpret_cast<TempStorageT&>(smem); int data = d_in[threadIdx.x]; // Compute sum int aggregate = BlockReduceT(temp_storage).Sum(data); // block-wide sync barrier necessary to re-use shared mem safely __syncthreads(); int* smem_integers = reinterpret_cast<int*>(smem); if (threadIdx.x == 0) smem_integers[0] = aggregate; // sync to make new shared value available to all threads __syncthreads(); aggregate = smem_integers[0]; // all threads write the aggregate to output d_out[threadIdx.x] = aggregate; } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize reduction problem (and solution). * Returns the aggregate */ int Initialize(int *h_in, int num_items) { int inclusive = 0; for (int i = 0; i < num_items; ++i) { h_in[i] = i % 17; inclusive += h_in[i]; } return inclusive; } /** * Test thread block reduction */ template <int BLOCK_THREADS> void Test() { // Allocate host arrays int *h_in = new int[BLOCK_THREADS]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, BLOCK_THREADS); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; hipMalloc((void**)&d_in, sizeof(int) * BLOCK_THREADS); hipMalloc((void**)&d_out, sizeof(int) * BLOCK_THREADS); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < BLOCK_THREADS; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Copy problem to device hipMemcpy(d_in, h_in, sizeof(int) * BLOCK_THREADS, hipMemcpyHostToDevice); // determine necessary storage size: auto block_reduce_temp_bytes = sizeof(typename hipcub::BlockReduce<int, BLOCK_THREADS>::TempStorage); // finally, we need to make sure that we can hold at least one integer // needed in the kernel to exchange data after reduction auto smem_size = (std::max)(1 * sizeof(int), block_reduce_temp_bytes); // use default stream hipStream_t stream = NULL; // Run reduction kernel hipLaunchKernelGGL(( BlockReduceKernel<BLOCK_THREADS>) , dim3(g_grid_size), dim3(BLOCK_THREADS), smem_size, stream, d_in, d_out); // Check total aggregate printf("\tAggregate: "); int compare = 0; for (int i = 0; i < BLOCK_THREADS; i++) { compare = compare || CompareDeviceResults( &h_aggregate, d_out + i, 1, g_verbose, g_verbose); } printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Check for kernel errors and STDIO from the kernel, if any CubDebugExit(hipPeekAtLastError()); CubDebugExit(hipDeviceSynchronize()); // Cleanup if (h_in) delete[] h_in; if (d_in) hipFree(d_in); if (d_out) hipFree(d_out); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("grid-size", g_grid_size); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--grid-size=<grid size>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Run tests Test<1024>(); Test<512>(); Test<256>(); Test<128>(); Test<64>(); Test<32>(); Test<16>(); return 0; } #else // < C++14 int main() {} #endif
54d91fbbfd82f2bb1c37654c010740bc1a695ad1.cu
/****************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of cub::BlockReduce with dynamic shared memory * * To compile using the command line: * nvcc -arch=sm_XX example_block_reduce_dyn_smem.cu -I../.. -lcudart -O3 -std=c++14 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <algorithm> #include <iostream> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/block/block_reduce.cuh> #include "../../test/test_util.h" // Some implementation details rely on c++14 #if CUB_CPP_DIALECT >= 2014 using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- /// Verbose output bool g_verbose = false; /// Default grid size int g_grid_size = 1; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide reduction. */ template <int BLOCK_THREADS> __global__ void BlockReduceKernel( int *d_in, // Tile of input int *d_out // Tile aggregate ) { // Specialize BlockReduce type for our thread block using BlockReduceT = cub::BlockReduce<int, BLOCK_THREADS>; using TempStorageT = typename BlockReduceT::TempStorage; union ShmemLayout { TempStorageT reduce; int aggregate; }; // shared memory byte-array extern __shared__ __align__(alignof(ShmemLayout)) char smem[]; // cast to lvalue reference of expected type auto& temp_storage = reinterpret_cast<TempStorageT&>(smem); int data = d_in[threadIdx.x]; // Compute sum int aggregate = BlockReduceT(temp_storage).Sum(data); // block-wide sync barrier necessary to re-use shared mem safely __syncthreads(); int* smem_integers = reinterpret_cast<int*>(smem); if (threadIdx.x == 0) smem_integers[0] = aggregate; // sync to make new shared value available to all threads __syncthreads(); aggregate = smem_integers[0]; // all threads write the aggregate to output d_out[threadIdx.x] = aggregate; } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize reduction problem (and solution). * Returns the aggregate */ int Initialize(int *h_in, int num_items) { int inclusive = 0; for (int i = 0; i < num_items; ++i) { h_in[i] = i % 17; inclusive += h_in[i]; } return inclusive; } /** * Test thread block reduction */ template <int BLOCK_THREADS> void Test() { // Allocate host arrays int *h_in = new int[BLOCK_THREADS]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, BLOCK_THREADS); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; cudaMalloc((void**)&d_in, sizeof(int) * BLOCK_THREADS); cudaMalloc((void**)&d_out, sizeof(int) * BLOCK_THREADS); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < BLOCK_THREADS; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Copy problem to device cudaMemcpy(d_in, h_in, sizeof(int) * BLOCK_THREADS, cudaMemcpyHostToDevice); // determine necessary storage size: auto block_reduce_temp_bytes = sizeof(typename cub::BlockReduce<int, BLOCK_THREADS>::TempStorage); // finally, we need to make sure that we can hold at least one integer // needed in the kernel to exchange data after reduction auto smem_size = (std::max)(1 * sizeof(int), block_reduce_temp_bytes); // use default stream cudaStream_t stream = NULL; // Run reduction kernel BlockReduceKernel<BLOCK_THREADS> <<<g_grid_size, BLOCK_THREADS, smem_size, stream>>>( d_in, d_out); // Check total aggregate printf("\tAggregate: "); int compare = 0; for (int i = 0; i < BLOCK_THREADS; i++) { compare = compare || CompareDeviceResults( &h_aggregate, d_out + i, 1, g_verbose, g_verbose); } printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Check for kernel errors and STDIO from the kernel, if any CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Cleanup if (h_in) delete[] h_in; if (d_in) cudaFree(d_in); if (d_out) cudaFree(d_out); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("grid-size", g_grid_size); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--grid-size=<grid size>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Run tests Test<1024>(); Test<512>(); Test<256>(); Test<128>(); Test<64>(); Test<32>(); Test<16>(); return 0; } #else // < C++14 int main() {} #endif
f175c7487346c82ef004cd37a2023e3dc4bc9080.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************ Sample CUDA MEX code written by Fang Liu (leoliuf@gmail.com). ************************************************************************/ /* system header */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector> /* MEX header */ #include <mex.h> #include "matrix.h" /* nVIDIA CUDA header */ #include <hip/hip_runtime.h> /* fixing error : identifier "IUnknown" is undefined" */ #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN #endif #if defined(_WIN32) || defined(_WIN64) #include <windows.h> #endif /* includes CUDA kernel */ #include "gpuaddkernel.cuh" /* MEX entry function */ void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[]) { double *A, *B, *C; mwSignedIndex Am, An, Bm, Bn; /* argument check */ if ( nrhs != 2) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:inputmismatch", "Input arguments must be 2!"); } if ( nlhs != 1) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:outputmismatch", "Output arguments must be 1!"); } A = mxGetPr(prhs[0]); B = mxGetPr(prhs[1]); /* matrix size */ Am = (mwSignedIndex)mxGetM(prhs[0]); An = (mwSignedIndex)mxGetN(prhs[0]); Bm = (mwSignedIndex)mxGetM(prhs[1]); Bn = (mwSignedIndex)mxGetN(prhs[1]); if ( Am != Bm || An != Bn) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:sizemismatch", "Input matrices must have the same size!"); } /* allocate output */ plhs[0] = mxCreateDoubleMatrix(Am, An, mxREAL); C = mxGetPr(plhs[0]); /* set GPU grid & block configuration */ hipDeviceProp_t deviceProp; memset( &deviceProp, 0, sizeof(deviceProp)); if( hipSuccess != hipGetDeviceProperties(&deviceProp,0)){ mexPrintf( "\n%s", hipGetErrorString(hipGetLastError())); return; } dim3 dimGridImg(8,1,1); dim3 dimBlockImg(1,64,1); /* allocate device memory for matrices */ double *d_A = NULL; hipMalloc( (void**) &d_A, Am * An * sizeof(double)) ; hipMemcpy( d_A, A, Am * An * sizeof(double), hipMemcpyHostToDevice) ; double *d_B = NULL; hipMalloc( (void**) &d_B, Bm * Bn * sizeof(double)) ; hipMemcpy( d_B, B, Bm * Bn * sizeof(double), hipMemcpyHostToDevice) ; double *d_C = NULL; hipMalloc( (void**) &d_C, Am * An * sizeof(double)) ; /* call GPU kernel for addition */ hipLaunchKernelGGL(( gpuaddkernel), dim3(dimGridImg), dim3(dimBlockImg) , 0, 0, d_A, d_B, d_C, Am, An); hipDeviceSynchronize(); /* copy result from device */ hipMemcpy( C, d_C, Am * An * sizeof(double), hipMemcpyDeviceToHost) ; /* free GPU memory */ hipFree(d_A); hipFree(d_B); hipFree(d_C); }
f175c7487346c82ef004cd37a2023e3dc4bc9080.cu
/************************************************************************ Sample CUDA MEX code written by Fang Liu (leoliuf@gmail.com). ************************************************************************/ /* system header */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector> /* MEX header */ #include <mex.h> #include "matrix.h" /* nVIDIA CUDA header */ #include <cuda.h> /* fixing error : identifier "IUnknown" is undefined" */ #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN #endif #if defined(_WIN32) || defined(_WIN64) #include <windows.h> #endif /* includes CUDA kernel */ #include "gpuaddkernel.cuh" /* MEX entry function */ void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[]) { double *A, *B, *C; mwSignedIndex Am, An, Bm, Bn; /* argument check */ if ( nrhs != 2) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:inputmismatch", "Input arguments must be 2!"); } if ( nlhs != 1) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:outputmismatch", "Output arguments must be 1!"); } A = mxGetPr(prhs[0]); B = mxGetPr(prhs[1]); /* matrix size */ Am = (mwSignedIndex)mxGetM(prhs[0]); An = (mwSignedIndex)mxGetN(prhs[0]); Bm = (mwSignedIndex)mxGetM(prhs[1]); Bn = (mwSignedIndex)mxGetN(prhs[1]); if ( Am != Bm || An != Bn) { mexErrMsgIdAndTxt("MATLAB:cudaAdd:sizemismatch", "Input matrices must have the same size!"); } /* allocate output */ plhs[0] = mxCreateDoubleMatrix(Am, An, mxREAL); C = mxGetPr(plhs[0]); /* set GPU grid & block configuration */ cudaDeviceProp deviceProp; memset( &deviceProp, 0, sizeof(deviceProp)); if( cudaSuccess != cudaGetDeviceProperties(&deviceProp,0)){ mexPrintf( "\n%s", cudaGetErrorString(cudaGetLastError())); return; } dim3 dimGridImg(8,1,1); dim3 dimBlockImg(1,64,1); /* allocate device memory for matrices */ double *d_A = NULL; cudaMalloc( (void**) &d_A, Am * An * sizeof(double)) ; cudaMemcpy( d_A, A, Am * An * sizeof(double), cudaMemcpyHostToDevice) ; double *d_B = NULL; cudaMalloc( (void**) &d_B, Bm * Bn * sizeof(double)) ; cudaMemcpy( d_B, B, Bm * Bn * sizeof(double), cudaMemcpyHostToDevice) ; double *d_C = NULL; cudaMalloc( (void**) &d_C, Am * An * sizeof(double)) ; /* call GPU kernel for addition */ gpuaddkernel<<< dimGridImg, dimBlockImg >>>(d_A, d_B, d_C, Am, An); cudaThreadSynchronize(); /* copy result from device */ cudaMemcpy( C, d_C, Am * An * sizeof(double), cudaMemcpyDeviceToHost) ; /* free GPU memory */ cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
b462b64569bc86c5534972dd77b0882dcbf5816f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include "multi_tensor_apply.cuh" #include "compat.h" #include <assert.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 512 #define ILP 4 /** * Perform fused SGD on multiple buffers * N: number of tensors * tl[0] : gradients * tl[1] : weights * tl[2] : momentum buffers * tl[3] : fp16 weights (if appropriate) * wd : weight_decay (scalar) * momentum : momentum (scalar) * dampening : momentum dampening (scalar) * lr : learning rate (scalar) * nesterov : enable nesterov (bool) * first run : necessary for proper momentum handling & init * wd_after_momentum : apply weight decay _after_ momentum instead of before **/ template<int N, typename T_grad, typename T_weight> struct SGDFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { // Early exit if we don't need to do anything if (*noop_gmem) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc]; grad_in += chunk_idx*chunk_size; T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc]; weight_in += chunk_idx*chunk_size; T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc]; mom_in += chunk_idx*chunk_size; at::Half *model_weights_out = nullptr; if(N == 4) { model_weights_out = (at::Half*)tl.addresses[3][tensor_loc]; model_weights_out += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // Non-divergent exit condition for the __syncthreads float incoming_grads[ILP]; float incoming_weights[ILP]; float incoming_moms[ILP]; for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_grads[ii] = 0; incoming_weights[ii] = 0; incoming_moms[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { incoming_grads[ii] = static_cast<float>(grad_in[i])*scale; incoming_weights[ii] = static_cast<float>(weight_in[i]); incoming_moms[ii] = static_cast<float>(mom_in[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { // apply weight decay before momentum if necessary if(wd != 0.f && !wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; if(momentum != 0.f) { if(!first_run) incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii]; else // initialize momentums to current incoming grads incoming_moms[ii] = incoming_grads[ii]; if(nesterov) incoming_grads[ii] += momentum * incoming_moms[ii]; else incoming_grads[ii] = incoming_moms[ii]; } // Apply WD after momentum if desired if(wd != 0.f && wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; // adjust the weight and write out weight_in[i] += (-lr * incoming_grads[ii]); // if necessary, write out an fp16 copy of the weights if(N == 4) model_weights_out[i] = static_cast<at::Half>(weight_in[i]); // also write out the new momentum if(momentum != 0.f) mom_in[i] = incoming_moms[ii]; } } } } }; void multi_tensor_sgd_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { auto num_tensors = tensor_lists.size(); auto grad_type = tensor_lists[0][0].scalar_type(); auto weight_type = tensor_lists[1][0].scalar_type(); if(num_tensors == 4) for(int i = 0; i < tensor_lists[3].size(); i++) TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half, "Additional output tensors should always be fp16."); TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors"); // We have 3 possibilities to handle here, in terms of // grad_type, param_type, momentum_type, requires_fp16_copy // 1. fp16, fp16, fp16, No // 2. fp32, fp32, fp32, No // 3. fp16, fp32, fp32, Yes // 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case // It's easier to hardcode these possibilities than to use // switches etc. to handle the cross-product of cases where // we don't want the majority of them. // Case 1. fp16, fp16, fp16, No if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Half && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::Half, at::Half>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 2. fp16, fp32, fp32, No // else if (grad_type == at::ScalarType::Half && // weight_type == at::ScalarType::Float && // num_tensors == 3) { // multi_tensor_apply<3>( // BLOCK_SIZE, // chunk_size, // noop_flag, // tensor_lists, // SGDFunctor<3, at::Half, float>(), // wd, // momentum, // dampening, // lr, // nesterov, // first_run, // wd_after_momentum); // } // Case 2. fp32, fp32, fp32, No else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 3. fp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::Half, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 4. fp32, fp32, fp32, Yes else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } else { AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ", "gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors); } AT_CUDA_CHECK(hipGetLastError()); }
b462b64569bc86c5534972dd77b0882dcbf5816f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include "multi_tensor_apply.cuh" #include "compat.h" #include <assert.h> #include <cuda_runtime.h> #define BLOCK_SIZE 512 #define ILP 4 /** * Perform fused SGD on multiple buffers * N: number of tensors * tl[0] : gradients * tl[1] : weights * tl[2] : momentum buffers * tl[3] : fp16 weights (if appropriate) * wd : weight_decay (scalar) * momentum : momentum (scalar) * dampening : momentum dampening (scalar) * lr : learning rate (scalar) * nesterov : enable nesterov (bool) * first run : necessary for proper momentum handling & init * wd_after_momentum : apply weight decay _after_ momentum instead of before **/ template<int N, typename T_grad, typename T_weight> struct SGDFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { // Early exit if we don't need to do anything if (*noop_gmem) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc]; grad_in += chunk_idx*chunk_size; T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc]; weight_in += chunk_idx*chunk_size; T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc]; mom_in += chunk_idx*chunk_size; at::Half *model_weights_out = nullptr; if(N == 4) { model_weights_out = (at::Half*)tl.addresses[3][tensor_loc]; model_weights_out += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // Non-divergent exit condition for the __syncthreads float incoming_grads[ILP]; float incoming_weights[ILP]; float incoming_moms[ILP]; for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_grads[ii] = 0; incoming_weights[ii] = 0; incoming_moms[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { incoming_grads[ii] = static_cast<float>(grad_in[i])*scale; incoming_weights[ii] = static_cast<float>(weight_in[i]); incoming_moms[ii] = static_cast<float>(mom_in[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { // apply weight decay before momentum if necessary if(wd != 0.f && !wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; if(momentum != 0.f) { if(!first_run) incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii]; else // initialize momentums to current incoming grads incoming_moms[ii] = incoming_grads[ii]; if(nesterov) incoming_grads[ii] += momentum * incoming_moms[ii]; else incoming_grads[ii] = incoming_moms[ii]; } // Apply WD after momentum if desired if(wd != 0.f && wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; // adjust the weight and write out weight_in[i] += (-lr * incoming_grads[ii]); // if necessary, write out an fp16 copy of the weights if(N == 4) model_weights_out[i] = static_cast<at::Half>(weight_in[i]); // also write out the new momentum if(momentum != 0.f) mom_in[i] = incoming_moms[ii]; } } } } }; void multi_tensor_sgd_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { auto num_tensors = tensor_lists.size(); auto grad_type = tensor_lists[0][0].scalar_type(); auto weight_type = tensor_lists[1][0].scalar_type(); if(num_tensors == 4) for(int i = 0; i < tensor_lists[3].size(); i++) TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half, "Additional output tensors should always be fp16."); TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors"); // We have 3 possibilities to handle here, in terms of // grad_type, param_type, momentum_type, requires_fp16_copy // 1. fp16, fp16, fp16, No // 2. fp32, fp32, fp32, No // 3. fp16, fp32, fp32, Yes // 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case // It's easier to hardcode these possibilities than to use // switches etc. to handle the cross-product of cases where // we don't want the majority of them. // Case 1. fp16, fp16, fp16, No if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Half && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::Half, at::Half>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 2. fp16, fp32, fp32, No // else if (grad_type == at::ScalarType::Half && // weight_type == at::ScalarType::Float && // num_tensors == 3) { // multi_tensor_apply<3>( // BLOCK_SIZE, // chunk_size, // noop_flag, // tensor_lists, // SGDFunctor<3, at::Half, float>(), // wd, // momentum, // dampening, // lr, // nesterov, // first_run, // wd_after_momentum); // } // Case 2. fp32, fp32, fp32, No else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 3. fp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::Half, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 4. fp32, fp32, fp32, Yes else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } else { AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ", "gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors); } AT_CUDA_CHECK(cudaGetLastError()); }
2eafba46f1d1b940b38cf428347e8e1b415904e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <system/Environment.h> #include <loops/transform_float.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformFloatSimple(const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformFloat<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<Z*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (Nd4jLong i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Y> __device__ void TransformFloat<X,Y>::transformCudaLegacy( const int opNum, const void *x, const Nd4jLong *xShapeInfo, void *params, void *z, const Nd4jLong *zShapeInfo, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); } template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformFloat<X,Z>::intermediateShaped( dim3 launchDims, hipStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformFloatSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES); } }
2eafba46f1d1b940b38cf428347e8e1b415904e4.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <system/Environment.h> #include <loops/transform_float.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformFloatSimple(const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformFloat<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<Z*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (Nd4jLong i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Y> __device__ void TransformFloat<X,Y>::transformCudaLegacy( const int opNum, const void *x, const Nd4jLong *xShapeInfo, void *params, void *z, const Nd4jLong *zShapeInfo, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); } template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformFloat<X,Z>::intermediateShaped( dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { transformFloatSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES); } }
dbe9853783223283aea52227c95e0a775e58f12a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <memory> #include <vector> #include "dali/core/util.h" #include "dali/test/test_tensors.h" #include "dali/kernels/signal/window/extract_windows_gpu.cuh" #include "dali/kernels/scratch.h" #include "dali/kernels/signal/window/window_functions.h" namespace dali { namespace kernels { namespace signal { TEST(ExtractWindowsGPU, NonBatchedKernel) { float *in_gpu, *out_gpu; int winlen = 60; int outwinlen = 63; int windows = 80; int stride = windows; int step = 10; int length = windows * step - 100;; int center = 5; bool reflect = false; hipMalloc(&in_gpu, sizeof(float)*length); hipMalloc(&out_gpu, sizeof(float)*windows*outwinlen); std::vector<float> in(length), out(windows*outwinlen); for (int i = 0; i < length; i++) { in[i] = i + 1000; } hipMemcpy(in_gpu, in.data(), sizeof(float)*length, hipMemcpyHostToDevice); hipMemset(out_gpu, 0xff, sizeof(float)*windows*outwinlen); int xblocks = div_ceil(length, 32); int yblocks = div_ceil(winlen, 32); hipLaunchKernelGGL(( window::ExtractVerticalWindowsKernel), dim3(dim3(xblocks, yblocks)), dim3(dim3(32, 32)), 0, 0, out_gpu, windows, stride, in_gpu, length, nullptr, winlen, outwinlen, center, step, reflect); hipMemcpy(out.data(), out_gpu, sizeof(float)*winlen*windows, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for (int w = 0; w < windows; w++) { for (int i = 0; i < winlen; i++) { int idx = w * step + i - center; if (reflect) idx = boundary::idx_reflect_101(idx, 0, length); float ref = idx >= 0 && idx < length ? in[idx] : 0; EXPECT_EQ(out[w + i*stride], ref) << "@ window = " << w << ", index = " << i; } for (int i = winlen; i < outwinlen; i++) { EXPECT_EQ(out[w + i*stride], 0) << "padding @ window = " << w << ", index = " << i; } } if (HasFailure()) { std::cout << "Debug: Extract window actual output:\n"; for (int i = 0; i < outwinlen; i++) { for (int j = 0; j < windows; j++) { std::cout << out[i*stride+j] << " "; } std::cout << "\n"; } std::cout << std::flush; } hipFree(in_gpu); hipFree(out_gpu); } void TestBatchedExtract( ExtractWindowsImplGPU<float, float> *extract, const TensorListShape<1> &lengths, bool concatenate, Padding padding, span<const float> window, int out_win_len = -1) { bool vertical = extract->IsVertical(); ScratchpadAllocator sa; int N = lengths.num_samples(); ptrdiff_t total_length = 0; for (int i = 0; i < N; i++) { total_length += lengths[i][0]; } TestTensorList<float, 1> in_list; in_list.reshape(lengths); auto in_cpu = in_list.cpu(); for (int i = 0; i < N; i++) { for (int j = 0; j < lengths[i][0]; j++) in_cpu[i].data[j] = 1000*(i+1)+j; } ExtractWindowsArgs args; args.window_length = window.empty() ? 55 : window.size(); args.window_center = window.empty() ? 21 : window.size()/2; args.window_step = 2; args.padding = padding; int out_win_len_actual = out_win_len < 0 ? args.window_length : out_win_len; KernelContext ctx; auto in_gpu = in_list.gpu(0); auto req = extract->Setup(ctx, make_span(lengths.shapes), args, concatenate, out_win_len); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0].num_samples(), concatenate ? 1 : N); sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; TestTensorList<float, 2> out; memory::KernelUniquePtr<float> gpu_win; if (!window.empty()) { gpu_win = memory::alloc_unique<float>(AllocType::GPU, window.size()); hipMemcpy(gpu_win.get(), window.data(), sizeof(float)*window.size(), hipMemcpyHostToDevice); } auto window_gpu = make_tensor_gpu<1>(gpu_win.get(), { window.size() }); out.reshape(req.output_shapes[0].to_static<2>()); auto out_gpu = out.gpu(0); hipMemset(out_gpu.data[0], 0xff, sizeof(float)*out_gpu.shape.num_elements()); extract->Run(ctx, out_gpu, in_gpu, window_gpu); auto out_cpu = out.cpu(); ptrdiff_t ofs = 0; for (int sample = 0; sample < N; sample++) { ptrdiff_t length = lengths[sample][0]; int nwnd = args.num_windows(length); int out_sample = 0; if (!concatenate) { ofs = 0; out_sample = sample; } ptrdiff_t sample_stride = vertical ? out_cpu.shape[out_sample][1] : 1; ptrdiff_t window_stride = vertical ? 1 : out_cpu.shape[out_sample][1]; for (int w = 0; w < nwnd; w++, ofs += window_stride) { int i = 0; for (; i < args.window_length; i++) { ptrdiff_t idx = w * args.window_step + i - args.window_center; if (args.padding == Padding::Reflect) { idx = boundary::idx_reflect_101(idx, length); } float ref = idx >= 0 && idx < length ? in_cpu.data[sample][idx] : 0; if (!window.empty()) ref *= window[i]; ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], ref) << "@ sample = " << sample << ", window = " << w << ", index = " << i; } for (; i < out_win_len_actual; i++) { ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], 0) << "padding @ sample = " << sample << ", window = " << w << ", index = " << i; } } } } void TestBatchedExtract( const TensorListShape<1> &lengths, bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } void TestBatchedExtract( bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TensorListShape<1> lengths = {{ 5, 305, 157 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); lengths = {{ 137, 203, 150, 12 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } TEST(ExtractVerticalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(true, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, SizeSweep) { int max_size = 2048; std::vector<TensorShape<1>> lengths; int step = 1; for (int s = 1; s <= max_size; s+=step) { if ((s&255) == 0) { if (step > 1) // add 2^n-1 lengths.push_back({s-1}); step += step; } lengths.push_back({s}); } TensorListShape<1> shape(lengths); vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(shape, false, Padding::Reflect, false, make_cspan(window)); } } // namespace signal } // namespace kernels } // namespace dali
dbe9853783223283aea52227c95e0a775e58f12a.cu
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <memory> #include <vector> #include "dali/core/util.h" #include "dali/test/test_tensors.h" #include "dali/kernels/signal/window/extract_windows_gpu.cuh" #include "dali/kernels/scratch.h" #include "dali/kernels/signal/window/window_functions.h" namespace dali { namespace kernels { namespace signal { TEST(ExtractWindowsGPU, NonBatchedKernel) { float *in_gpu, *out_gpu; int winlen = 60; int outwinlen = 63; int windows = 80; int stride = windows; int step = 10; int length = windows * step - 100;; int center = 5; bool reflect = false; cudaMalloc(&in_gpu, sizeof(float)*length); cudaMalloc(&out_gpu, sizeof(float)*windows*outwinlen); std::vector<float> in(length), out(windows*outwinlen); for (int i = 0; i < length; i++) { in[i] = i + 1000; } cudaMemcpy(in_gpu, in.data(), sizeof(float)*length, cudaMemcpyHostToDevice); cudaMemset(out_gpu, 0xff, sizeof(float)*windows*outwinlen); int xblocks = div_ceil(length, 32); int yblocks = div_ceil(winlen, 32); window::ExtractVerticalWindowsKernel<<<dim3(xblocks, yblocks), dim3(32, 32)>>>( out_gpu, windows, stride, in_gpu, length, nullptr, winlen, outwinlen, center, step, reflect); cudaMemcpy(out.data(), out_gpu, sizeof(float)*winlen*windows, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int w = 0; w < windows; w++) { for (int i = 0; i < winlen; i++) { int idx = w * step + i - center; if (reflect) idx = boundary::idx_reflect_101(idx, 0, length); float ref = idx >= 0 && idx < length ? in[idx] : 0; EXPECT_EQ(out[w + i*stride], ref) << "@ window = " << w << ", index = " << i; } for (int i = winlen; i < outwinlen; i++) { EXPECT_EQ(out[w + i*stride], 0) << "padding @ window = " << w << ", index = " << i; } } if (HasFailure()) { std::cout << "Debug: Extract window actual output:\n"; for (int i = 0; i < outwinlen; i++) { for (int j = 0; j < windows; j++) { std::cout << out[i*stride+j] << " "; } std::cout << "\n"; } std::cout << std::flush; } cudaFree(in_gpu); cudaFree(out_gpu); } void TestBatchedExtract( ExtractWindowsImplGPU<float, float> *extract, const TensorListShape<1> &lengths, bool concatenate, Padding padding, span<const float> window, int out_win_len = -1) { bool vertical = extract->IsVertical(); ScratchpadAllocator sa; int N = lengths.num_samples(); ptrdiff_t total_length = 0; for (int i = 0; i < N; i++) { total_length += lengths[i][0]; } TestTensorList<float, 1> in_list; in_list.reshape(lengths); auto in_cpu = in_list.cpu(); for (int i = 0; i < N; i++) { for (int j = 0; j < lengths[i][0]; j++) in_cpu[i].data[j] = 1000*(i+1)+j; } ExtractWindowsArgs args; args.window_length = window.empty() ? 55 : window.size(); args.window_center = window.empty() ? 21 : window.size()/2; args.window_step = 2; args.padding = padding; int out_win_len_actual = out_win_len < 0 ? args.window_length : out_win_len; KernelContext ctx; auto in_gpu = in_list.gpu(0); auto req = extract->Setup(ctx, make_span(lengths.shapes), args, concatenate, out_win_len); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0].num_samples(), concatenate ? 1 : N); sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; TestTensorList<float, 2> out; memory::KernelUniquePtr<float> gpu_win; if (!window.empty()) { gpu_win = memory::alloc_unique<float>(AllocType::GPU, window.size()); cudaMemcpy(gpu_win.get(), window.data(), sizeof(float)*window.size(), cudaMemcpyHostToDevice); } auto window_gpu = make_tensor_gpu<1>(gpu_win.get(), { window.size() }); out.reshape(req.output_shapes[0].to_static<2>()); auto out_gpu = out.gpu(0); cudaMemset(out_gpu.data[0], 0xff, sizeof(float)*out_gpu.shape.num_elements()); extract->Run(ctx, out_gpu, in_gpu, window_gpu); auto out_cpu = out.cpu(); ptrdiff_t ofs = 0; for (int sample = 0; sample < N; sample++) { ptrdiff_t length = lengths[sample][0]; int nwnd = args.num_windows(length); int out_sample = 0; if (!concatenate) { ofs = 0; out_sample = sample; } ptrdiff_t sample_stride = vertical ? out_cpu.shape[out_sample][1] : 1; ptrdiff_t window_stride = vertical ? 1 : out_cpu.shape[out_sample][1]; for (int w = 0; w < nwnd; w++, ofs += window_stride) { int i = 0; for (; i < args.window_length; i++) { ptrdiff_t idx = w * args.window_step + i - args.window_center; if (args.padding == Padding::Reflect) { idx = boundary::idx_reflect_101(idx, length); } float ref = idx >= 0 && idx < length ? in_cpu.data[sample][idx] : 0; if (!window.empty()) ref *= window[i]; ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], ref) << "@ sample = " << sample << ", window = " << w << ", index = " << i; } for (; i < out_win_len_actual; i++) { ASSERT_EQ(out_cpu.data[out_sample][ofs + i*sample_stride], 0) << "padding @ sample = " << sample << ", window = " << w << ", index = " << i; } } } } void TestBatchedExtract( const TensorListShape<1> &lengths, bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } void TestBatchedExtract( bool concatenate, Padding padding, bool vertical, span<const float> window, int out_win_len = -1) { std::unique_ptr<ExtractWindowsImplGPU<float, float>> extract; if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); TensorListShape<1> lengths = {{ 5, 305, 157 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); if (vertical) extract = std::make_unique<ExtractVerticalWindowsImplGPU<float, float>>(); else extract = std::make_unique<ExtractHorizontalWindowsImplGPU<float, float>>(); lengths = {{ 137, 203, 150, 12 }}; TestBatchedExtract(extract.get(), lengths, concatenate, padding, window, out_win_len); } TEST(ExtractVerticalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, true, {}); } TEST(ExtractVerticalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window)); } TEST(ExtractVerticalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(true, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcat) { TestBatchedExtract(true, Padding::Reflect, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparate) { TestBatchedExtract(false, Padding::Zero, false, {}); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFunc) { vector<float> window(60); HannWindow(make_span(window)); TestBatchedExtract(true, Padding::Zero, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFunc) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window)); } TEST(ExtractHorizontalWindowsGPU, BatchedSeparateWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, false, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, BatchedConcatWindowFuncPad) { vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(false, Padding::Reflect, true, make_cspan(window), 72); } TEST(ExtractHorizontalWindowsGPU, SizeSweep) { int max_size = 2048; std::vector<TensorShape<1>> lengths; int step = 1; for (int s = 1; s <= max_size; s+=step) { if ((s&255) == 0) { if (step > 1) // add 2^n-1 lengths.push_back({s-1}); step += step; } lengths.push_back({s}); } TensorListShape<1> shape(lengths); vector<float> window(60); HammingWindow(make_span(window)); TestBatchedExtract(shape, false, Padding::Reflect, false, make_cspan(window)); } } // namespace signal } // namespace kernels } // namespace dali
32bd3399a87ff04fb0c2653f93a74ce8fc089b52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "Utilities.cuh" #include "transform_reduce.cuh" #define BLOCKSIZE 512 #define warpSize 32 // --- Host-side function pointer pointFunction_t h_pfunc; // --- Uncomment if you want to apply CUDA error checking to the kernel launches //#define DEBUG //#define EXTERNAL /*******************************************************/ /* CALCULATING THE NEXT POWER OF 2 OF A CERTAIN NUMBER */ /*******************************************************/ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /*************************************/ /* CHECK IF A NUMBER IS A POWER OF 2 */ /*************************************/ // --- Note: although x = 1 is a power of 2 (1 = 2^0), this routine returns 0 for x == 1. bool isPow2(unsigned int x) { if (x == 1) return 0; else return ((x&(x-1))==0); } /***************************/ /* TRANSFORMATION FUNCTION */ /***************************/ //extern "C" __host__ __device__ float transformation(const float * __restrict__, const int); //__device__ float trans(const float * __restrict__ x, const int i) { return transformation(x, i); }; // __host__ __device__ float trans(const float * __restrict__ x, const int i) { transformation(x, i); }; template <class T> //__host__ __device__ T transformation(const T * __restrict__ x, const int i) { return ((T)100 * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - (T)1) * (x[i] - (T)1)) ; } __host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return ((T)100 * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - (T)1) * (x[i] - (T)1)) ; } //__host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return x[i]; } //__host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return log(x[i]) * x[i]; } //__host__ __device__ __forceinline__ float transformation(const float * __restrict__ x, const int i) { return (100.f * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - 1.f) * (x[i] - 1.f)) ; } /********************/ /* REDUCTION KERNEL */ /********************/ /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reductionKernel(T *g_idata, T *g_odata, unsigned int N, pointFunction_t pPointTransformation) { extern __shared__ T sdata[]; unsigned int tid = threadIdx.x; // Local thread index unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; // Global thread index - Fictitiously double the block dimension unsigned int gridSize = blockSize*2*gridDim.x; // --- Performs the first level of reduction in registers when reading from global memory on multiple elements per thread. // More blocks will result in a larger gridSize and therefore fewer elements per thread T mySum = 0; while (i < N) { #ifdef EXTERNAL mySum += (*pPointTransformation)(g_idata, i); #else mySum += transformation(g_idata, i); #endif // --- Ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < N) #ifdef EXTERNAL mySum += (*pPointTransformation)(g_idata, i+blockSize); #else mySum += transformation(g_idata, i+blockSize); #endif i += gridSize; } // --- Each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // --- Reduction in shared memory. Fully unrolled loop. if ((blockSize >= 512) && (tid < 256)) sdata[tid] = mySum = mySum + sdata[tid + 256]; __syncthreads(); if ((blockSize >= 256) && (tid < 128)) sdata[tid] = mySum = mySum + sdata[tid + 128]; __syncthreads(); if ((blockSize >= 128) && (tid < 64)) sdata[tid] = mySum = mySum + sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) // --- Single warp reduction by shuffle operations if ( tid < 32 ) { // --- Last iteration removed from the for loop, but needed for shuffle reduction mySum += sdata[tid + 32]; // --- Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) mySum += __shfl_down(mySum, offset); //for (int offset=1; offset < warpSize; offset *= 2) mySum += __shfl_xor(mySum, i); } #else // --- Reduction within a single warp. Fully unrolled loop. if ((blockSize >= 64) && (tid < 32)) sdata[tid] = mySum = mySum + sdata[tid + 32]; __syncthreads(); if ((blockSize >= 32) && (tid < 16)) sdata[tid] = mySum = mySum + sdata[tid + 16]; __syncthreads(); if ((blockSize >= 16) && (tid < 8)) sdata[tid] = mySum = mySum + sdata[tid + 8]; __syncthreads(); if ((blockSize >= 8) && (tid < 4)) sdata[tid] = mySum = mySum + sdata[tid + 4]; __syncthreads(); if ((blockSize >= 4) && (tid < 2)) sdata[tid] = mySum = mySum + sdata[tid + 2]; __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) sdata[tid] = mySum = mySum + sdata[tid + 1]; __syncthreads(); #endif // --- Write result for this block to global memory. At the end of the kernel, global memory will contain the results for the summations of // individual blocks if (tid == 0) g_odata[blockIdx.x] = mySum; } /******************************/ /* REDUCTION WRAPPER FUNCTION */ /******************************/ template <class T> T transform_reduce_inner(T *g_idata, unsigned int N, pointFunction_t h_pfunc) { // --- Reduction parameters const int NumThreads = (N < BLOCKSIZE) ? nextPow2(N) : BLOCKSIZE; const int NumBlocks = (N + NumThreads - 1) / NumThreads; const int smemSize = (NumThreads <= 32) ? 2 * NumThreads * sizeof(T) : NumThreads * sizeof(T); // --- Device memory space where storing the partial reduction results T *g_odata; gpuErrchk(hipMalloc((void**)&g_odata, NumBlocks * sizeof(T))); if (isPow2(N)) { switch (NumThreads) { case 512:hipLaunchKernelGGL(( reductionKernel<T, 512, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 256:hipLaunchKernelGGL(( reductionKernel<T, 256, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 128:hipLaunchKernelGGL(( reductionKernel<T, 128, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 64: hipLaunchKernelGGL(( reductionKernel<T, 64, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 32: hipLaunchKernelGGL(( reductionKernel<T, 32, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 16: hipLaunchKernelGGL(( reductionKernel<T, 16, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 8: hipLaunchKernelGGL(( reductionKernel<T, 8, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 4: hipLaunchKernelGGL(( reductionKernel<T, 4, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 2: hipLaunchKernelGGL(( reductionKernel<T, 2, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 1: hipLaunchKernelGGL(( reductionKernel<T, 1, true>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; } #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } else { switch (NumThreads) { case 512:hipLaunchKernelGGL(( reductionKernel<T, 512, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 256:hipLaunchKernelGGL(( reductionKernel<T, 256, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 128:hipLaunchKernelGGL(( reductionKernel<T, 128, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 64: hipLaunchKernelGGL(( reductionKernel<T, 64, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 32: hipLaunchKernelGGL(( reductionKernel<T, 32, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 16: hipLaunchKernelGGL(( reductionKernel<T, 16, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 8: hipLaunchKernelGGL(( reductionKernel<T, 8, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 4: hipLaunchKernelGGL(( reductionKernel<T, 4, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 2: hipLaunchKernelGGL(( reductionKernel<T, 2, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; case 1: hipLaunchKernelGGL(( reductionKernel<T, 1, false>), dim3(NumBlocks), dim3(NumThreads), smemSize, 0, g_idata, g_odata, N, h_pfunc); break; } #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } // --- The last part of the reduction, which would be expensive to perform on the device, is executed on the host T *host_vector = (T *)malloc(NumBlocks * sizeof(T)); gpuErrchk(hipMemcpy(host_vector, g_odata, NumBlocks * sizeof(T), hipMemcpyDeviceToHost)); T sum_transformReduce = (T)0; for (int i=0; i<NumBlocks; i++) sum_transformReduce = sum_transformReduce + host_vector[i]; return sum_transformReduce; } template <class T> T transform_reduce(T *g_idata, unsigned int N, pointFunction_t *dev_pfunc) { #ifdef EXTERNAL gpuErrchk(hipMemcpyFromSymbol(&h_pfunc, *dev_pfunc, sizeof(pointFunction_t))); #endif T customizedDeviceResult = transform_reduce_inner(g_idata, N, h_pfunc); return customizedDeviceResult; } // --- Complete with your own favourite instantiations template float transform_reduce(float *, unsigned int, pointFunction_t *);
32bd3399a87ff04fb0c2653f93a74ce8fc089b52.cu
#include <stdio.h> #include "Utilities.cuh" #include "transform_reduce.cuh" #define BLOCKSIZE 512 #define warpSize 32 // --- Host-side function pointer pointFunction_t h_pfunc; // --- Uncomment if you want to apply CUDA error checking to the kernel launches //#define DEBUG //#define EXTERNAL /*******************************************************/ /* CALCULATING THE NEXT POWER OF 2 OF A CERTAIN NUMBER */ /*******************************************************/ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /*************************************/ /* CHECK IF A NUMBER IS A POWER OF 2 */ /*************************************/ // --- Note: although x = 1 is a power of 2 (1 = 2^0), this routine returns 0 for x == 1. bool isPow2(unsigned int x) { if (x == 1) return 0; else return ((x&(x-1))==0); } /***************************/ /* TRANSFORMATION FUNCTION */ /***************************/ //extern "C" __host__ __device__ float transformation(const float * __restrict__, const int); //__device__ float trans(const float * __restrict__ x, const int i) { return transformation(x, i); }; // __host__ __device__ float trans(const float * __restrict__ x, const int i) { transformation(x, i); }; template <class T> //__host__ __device__ T transformation(const T * __restrict__ x, const int i) { return ((T)100 * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - (T)1) * (x[i] - (T)1)) ; } __host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return ((T)100 * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - (T)1) * (x[i] - (T)1)) ; } //__host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return x[i]; } //__host__ __device__ __forceinline__ T transformation(const T * __restrict__ x, const int i) { return log(x[i]) * x[i]; } //__host__ __device__ __forceinline__ float transformation(const float * __restrict__ x, const int i) { return (100.f * (x[i+1] - x[i] * x[i]) * (x[i+1] - x[i] * x[i]) + (x[i] - 1.f) * (x[i] - 1.f)) ; } /********************/ /* REDUCTION KERNEL */ /********************/ /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reductionKernel(T *g_idata, T *g_odata, unsigned int N, pointFunction_t pPointTransformation) { extern __shared__ T sdata[]; unsigned int tid = threadIdx.x; // Local thread index unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; // Global thread index - Fictitiously double the block dimension unsigned int gridSize = blockSize*2*gridDim.x; // --- Performs the first level of reduction in registers when reading from global memory on multiple elements per thread. // More blocks will result in a larger gridSize and therefore fewer elements per thread T mySum = 0; while (i < N) { #ifdef EXTERNAL mySum += (*pPointTransformation)(g_idata, i); #else mySum += transformation(g_idata, i); #endif // --- Ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < N) #ifdef EXTERNAL mySum += (*pPointTransformation)(g_idata, i+blockSize); #else mySum += transformation(g_idata, i+blockSize); #endif i += gridSize; } // --- Each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // --- Reduction in shared memory. Fully unrolled loop. if ((blockSize >= 512) && (tid < 256)) sdata[tid] = mySum = mySum + sdata[tid + 256]; __syncthreads(); if ((blockSize >= 256) && (tid < 128)) sdata[tid] = mySum = mySum + sdata[tid + 128]; __syncthreads(); if ((blockSize >= 128) && (tid < 64)) sdata[tid] = mySum = mySum + sdata[tid + 64]; __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) // --- Single warp reduction by shuffle operations if ( tid < 32 ) { // --- Last iteration removed from the for loop, but needed for shuffle reduction mySum += sdata[tid + 32]; // --- Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) mySum += __shfl_down(mySum, offset); //for (int offset=1; offset < warpSize; offset *= 2) mySum += __shfl_xor(mySum, i); } #else // --- Reduction within a single warp. Fully unrolled loop. if ((blockSize >= 64) && (tid < 32)) sdata[tid] = mySum = mySum + sdata[tid + 32]; __syncthreads(); if ((blockSize >= 32) && (tid < 16)) sdata[tid] = mySum = mySum + sdata[tid + 16]; __syncthreads(); if ((blockSize >= 16) && (tid < 8)) sdata[tid] = mySum = mySum + sdata[tid + 8]; __syncthreads(); if ((blockSize >= 8) && (tid < 4)) sdata[tid] = mySum = mySum + sdata[tid + 4]; __syncthreads(); if ((blockSize >= 4) && (tid < 2)) sdata[tid] = mySum = mySum + sdata[tid + 2]; __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) sdata[tid] = mySum = mySum + sdata[tid + 1]; __syncthreads(); #endif // --- Write result for this block to global memory. At the end of the kernel, global memory will contain the results for the summations of // individual blocks if (tid == 0) g_odata[blockIdx.x] = mySum; } /******************************/ /* REDUCTION WRAPPER FUNCTION */ /******************************/ template <class T> T transform_reduce_inner(T *g_idata, unsigned int N, pointFunction_t h_pfunc) { // --- Reduction parameters const int NumThreads = (N < BLOCKSIZE) ? nextPow2(N) : BLOCKSIZE; const int NumBlocks = (N + NumThreads - 1) / NumThreads; const int smemSize = (NumThreads <= 32) ? 2 * NumThreads * sizeof(T) : NumThreads * sizeof(T); // --- Device memory space where storing the partial reduction results T *g_odata; gpuErrchk(cudaMalloc((void**)&g_odata, NumBlocks * sizeof(T))); if (isPow2(N)) { switch (NumThreads) { case 512: reductionKernel<T, 512, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 256: reductionKernel<T, 256, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 128: reductionKernel<T, 128, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 64: reductionKernel<T, 64, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 32: reductionKernel<T, 32, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 16: reductionKernel<T, 16, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 8: reductionKernel<T, 8, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 4: reductionKernel<T, 4, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 2: reductionKernel<T, 2, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 1: reductionKernel<T, 1, true><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; } #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } else { switch (NumThreads) { case 512: reductionKernel<T, 512, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 256: reductionKernel<T, 256, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 128: reductionKernel<T, 128, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 64: reductionKernel<T, 64, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 32: reductionKernel<T, 32, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 16: reductionKernel<T, 16, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 8: reductionKernel<T, 8, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 4: reductionKernel<T, 4, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 2: reductionKernel<T, 2, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; case 1: reductionKernel<T, 1, false><<< NumBlocks, NumThreads, smemSize>>>(g_idata, g_odata, N, h_pfunc); break; } #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } // --- The last part of the reduction, which would be expensive to perform on the device, is executed on the host T *host_vector = (T *)malloc(NumBlocks * sizeof(T)); gpuErrchk(cudaMemcpy(host_vector, g_odata, NumBlocks * sizeof(T), cudaMemcpyDeviceToHost)); T sum_transformReduce = (T)0; for (int i=0; i<NumBlocks; i++) sum_transformReduce = sum_transformReduce + host_vector[i]; return sum_transformReduce; } template <class T> T transform_reduce(T *g_idata, unsigned int N, pointFunction_t *dev_pfunc) { #ifdef EXTERNAL gpuErrchk(cudaMemcpyFromSymbol(&h_pfunc, *dev_pfunc, sizeof(pointFunction_t))); #endif T customizedDeviceResult = transform_reduce_inner(g_idata, N, h_pfunc); return customizedDeviceResult; } // --- Complete with your own favourite instantiations template float transform_reduce(float *, unsigned int, pointFunction_t *);
dd1d4a5a21072eac01b174acd885dc19a17ba076.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <ctime> #include <iostream> #include <fstream> #include <math.h> #include <sstream> #include <stdlib.h> #include <stdio.h> #include <string> #include <utility> #include <time.h> #include <vector> #include<cuda.h> #include<cuda_runtime.h> #include<cuda_kernel.h> #define NUM_TESTS 5 //Number of threads can be configured as per computer #define NUM_THREADS 256 #define BLOCKS 1024 // must be number of cities being read in file #define NUM_CITIES 194 #define MAX_COORD 250 // this should match #threads, at least in the beginning #define POPULATION_SIZE NUM_THREADS #define NUM_POPULATIONS BLOCKS #define NUM_EVOLUTIONS 100 #define MUTATION_RATE 0.33 #define ELITISM true #define TOURNAMENT_SIZE 12 #include <city.h> #include <population.h> #include <tour.h> using namespace std; __global__ void initCuRand(hiprandState_t *randState) { int td = blockDimint tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; hiprand_init(1337, tid, 0, &randState[tid]); } __global__ void evaluatePopulations(population_t *populations, const float *costTable) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; evalTour(populations[blockIdx.x].tours[threadIdx.x], costTable); } __global__ void selection(population_t *populations, hiprandState_t *randState, tour_t *parents) { // Get thread (particle) ID const int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; if (ELITISM && blockIdx.x == 0) parents[tid*2] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); else parents[tid*2] = tournamentSelection(populations[blockIdx.x], randState, tid); parents[tid*2+1] = tournamentSelection(populations[blockIdx.x], randState, tid); } __global__ void crossover(population_t *populations, tour_t *parents, hiprandState_t *randState, float *costTable, int index) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; populations[blockIdx.x].tours[threadIdx.x].cities[0] = parents[2*tid].cities[0]; city_t c1 = getValidNextCity(parents[tid*2], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index); city_t c2 = getValidNextCity(parents[tid*2+1], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index); // compare the two cities from parents to the last city that was chosen in the child city_t currentCity = populations[blockIdx.x].tours[threadIdx.x].cities[index-1]; if (costTable[c1.n*NUM_CITIES + currentCity.n] <= costTable[c2.n*NUM_CITIES + currentCity.n]) populations[blockIdx.x].tours[threadIdx.x].cities[index] = c1; else populations[blockIdx.x].tours[threadIdx.x].cities[index] = c2; } __global__ void mutate(population_t *populations, hiprandState_t *d_state) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; // pick random number between 0 and 1 hiprandState_t localState = d_state[tid]; // if random num is less than mutation_rate, perform mutation (swap two cities in tour) if (hiprand_uniform(&localState) < MUTATION_RATE) { int randNum1 = 1 + hiprand_uniform(&localState) * (NUM_CITIES - 1.0000001); int randNum2 = 1 + hiprand_uniform(&localState) * (NUM_CITIES - 1.0000001); city_t temp = populations[blockIdx.x].tours[threadIdx.x].cities[randNum1]; populations[blockIdx.x].tours[threadIdx.x].cities[randNum1] = populations[blockIdx.x].tours[threadIdx.x].cities[randNum2]; populations[blockIdx.x].tours[threadIdx.x].cities[randNum2] = temp; d_state[tid] = localState; } } __global__ void migrate(population_t *populations) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS || threadIdx.x != 0) return; int indexOfLeastFitInNeighbor; if (blockIdx.x == BLOCKS-1) { indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[0]); populations[0].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); } else { indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[blockIdx.x+1]); populations[blockIdx.x+1].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); } } int main(int argc, char **argv) { printf("THREADS:%d\n", NUM_THREADS); printf("BLOCKS:%d\n", BLOCKS); printf("TOURNAMENT_SIZE:%d\n", TOURNAMENT_SIZE); printf("NUM_EVOLUTIONS:%d\n", NUM_EVOLUTIONS); // Build city distances table tour_t initialTour; float costTable[NUM_CITIES*NUM_CITIES]; population_t populations[BLOCKS]; tour_t parents[NUM_POPULATIONS*POPULATION_SIZE*2]; // READS INITIAL TOUR FROM FILE ifstream file("berlin52.txt"); readTourFromFile(initialTour, file); // Build cost table to save time computing distance between cities // - array lookups are cheaper than squaring, adding, and sqrting buildCostTable(initialTour, costTable); //Memory Allocation population_t *d_populations; hipMalloc((void **) &d_populations, BLOCKS * sizeof(population_t)); // array to store parents selected from tournament selection tour_t *d_parents; hipMalloc((void **) &d_parents, sizeof(tour_t) * BLOCKS * NUM_THREADS * 2); // cost table for crossover function (SCX crossover) float *d_costTable; hipMalloc((void **) &d_costTable, sizeof(float) * NUM_CITIES * NUM_CITIES); hipMemcpy(d_costTable, &costTable, sizeof(float) * NUM_CITIES * NUM_CITIES, hipMemcpyHostToDevice); hiprandState_t *d_state; hipMalloc((void**)&d_state, BLOCKS * NUM_THREADS * sizeof(hiprandState_t)); // collects run results tour_t tours[NUM_TESTS]; //MAIN LOOP for (int k = 0; k < NUM_TESTS; ++k) { // Initializes all populations to NUMTHREADS number of individuals, randomized // Done on CPU (host) for (int i = 0; i < BLOCKS; ++i) initializePop(populations[i], initialTour); // copies data from host to device for evolution hipMemcpy(d_populations, &populations, NUM_POPULATIONS * sizeof(population_t), hipMemcpyHostToDevice); // ---------------------------------------------- // Times execution of evolve population on gpu // ----------------------------------------------- float milliseconds = 0; hipEvent_t start, stop; hipEventCreate (&start); hipEventCreate (&stop); hipEventRecord (start); // ----------- // MAIN LOOP // ----------- // initialize random numbers array for tournament selection hipLaunchKernelGGL(( initCuRand) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_state); // figure out distance and fitness for each individual in population hipLaunchKernelGGL(( evaluatePopulations) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_costTable); for (int i = 0; i < NUM_EVOLUTIONS; ++i) { hipLaunchKernelGGL(( selection) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_state, d_parents); // breed the population with tournament selection and SCX crossover // perform computation parallelized, build children iteratively for (int j = 1; j < NUM_CITIES; ++j) hipLaunchKernelGGL(( crossover) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_parents, d_state, d_costTable, j); hipLaunchKernelGGL(( mutate) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_state); hipLaunchKernelGGL(( evaluatePopulations) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations, d_costTable); // migrate every 5 evolutions if (i % 3 == 0) hipLaunchKernelGGL(( migrate) , dim3(BLOCKS), dim3(NUM_THREADS) , 0, 0, d_populations); } // ----------------------------------- // END MAIN LOOP // ----------------------------------- hipEventRecord (stop); hipEventSynchronize (stop); hipEventElapsedTime (&milliseconds, start, stop); // copy memory back to device! hipMemcpy(&populations, d_populations, NUM_POPULATIONS * sizeof(population_t), hipMemcpyDeviceToHost); hipDeviceSynchronize(); checkForError(); //printPopulation(initialPopulation); tour_t bestIndivs[NUM_POPULATIONS]; for (int i = 0; i < NUM_POPULATIONS; ++i) bestIndivs[i] = getFittestTour(populations[i].tours, NUM_THREADS); tour_t fittest = getFittestTour(bestIndivs, NUM_POPULATIONS); // --------------------- // PRINTS OUTPUT TO FILE // --------------------- printf("%f %f\n", milliseconds/1000, fittest.distance); //printf("Program execution time: %f sec", timeInitGPUPop+timeInitHostPop+(milliseconds/1000)+evalPopTime); tours[k] = fittest; } hipFree(d_populations); hipFree(d_parents); hipFree(d_costTable); hipFree(d_state); // tour_t mostFittest = getFittestTour(tours, NUM_TESTS); // printf("\nThe fittest tour OVERALL has length %f\n\n", mostFittest.distance); // printf("Winning Tour:\n"); // printTour(mostFittest); return 0; }
dd1d4a5a21072eac01b174acd885dc19a17ba076.cu
#include <algorithm> #include <ctime> #include <iostream> #include <fstream> #include <math.h> #include <sstream> #include <stdlib.h> #include <stdio.h> #include <string> #include <utility> #include <time.h> #include <vector> #include<cuda.h> #include<cuda_runtime.h> #include<cuda_kernel.h> #define NUM_TESTS 5 //Number of threads can be configured as per computer #define NUM_THREADS 256 #define BLOCKS 1024 // must be number of cities being read in file #define NUM_CITIES 194 #define MAX_COORD 250 // this should match #threads, at least in the beginning #define POPULATION_SIZE NUM_THREADS #define NUM_POPULATIONS BLOCKS #define NUM_EVOLUTIONS 100 #define MUTATION_RATE 0.33 #define ELITISM true #define TOURNAMENT_SIZE 12 #include <city.h> #include <population.h> #include <tour.h> using namespace std; __global__ void initCuRand(curandState *randState) { int td = blockDimint tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; curand_init(1337, tid, 0, &randState[tid]); } __global__ void evaluatePopulations(population_t *populations, const float *costTable) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; evalTour(populations[blockIdx.x].tours[threadIdx.x], costTable); } __global__ void selection(population_t *populations, curandState *randState, tour_t *parents) { // Get thread (particle) ID const int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; if (ELITISM && blockIdx.x == 0) parents[tid*2] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); else parents[tid*2] = tournamentSelection(populations[blockIdx.x], randState, tid); parents[tid*2+1] = tournamentSelection(populations[blockIdx.x], randState, tid); } __global__ void crossover(population_t *populations, tour_t *parents, curandState *randState, float *costTable, int index) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; populations[blockIdx.x].tours[threadIdx.x].cities[0] = parents[2*tid].cities[0]; city_t c1 = getValidNextCity(parents[tid*2], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index); city_t c2 = getValidNextCity(parents[tid*2+1], populations[blockIdx.x].tours[threadIdx.x], populations[blockIdx.x].tours[threadIdx.x].cities[index-1], index); // compare the two cities from parents to the last city that was chosen in the child city_t currentCity = populations[blockIdx.x].tours[threadIdx.x].cities[index-1]; if (costTable[c1.n*NUM_CITIES + currentCity.n] <= costTable[c2.n*NUM_CITIES + currentCity.n]) populations[blockIdx.x].tours[threadIdx.x].cities[index] = c1; else populations[blockIdx.x].tours[threadIdx.x].cities[index] = c2; } __global__ void mutate(population_t *populations, curandState *d_state) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS) return; // pick random number between 0 and 1 curandState localState = d_state[tid]; // if random num is less than mutation_rate, perform mutation (swap two cities in tour) if (curand_uniform(&localState) < MUTATION_RATE) { int randNum1 = 1 + curand_uniform(&localState) * (NUM_CITIES - 1.0000001); int randNum2 = 1 + curand_uniform(&localState) * (NUM_CITIES - 1.0000001); city_t temp = populations[blockIdx.x].tours[threadIdx.x].cities[randNum1]; populations[blockIdx.x].tours[threadIdx.x].cities[randNum1] = populations[blockIdx.x].tours[threadIdx.x].cities[randNum2]; populations[blockIdx.x].tours[threadIdx.x].cities[randNum2] = temp; d_state[tid] = localState; } } __global__ void migrate(population_t *populations) { // Get thread (particle) ID int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid >= NUM_THREADS*BLOCKS || threadIdx.x != 0) return; int indexOfLeastFitInNeighbor; if (blockIdx.x == BLOCKS-1) { indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[0]); populations[0].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); } else { indexOfLeastFitInNeighbor = getIndexOfLeastFit(populations[blockIdx.x+1]); populations[blockIdx.x+1].tours[indexOfLeastFitInNeighbor] = getFittestTour(populations[blockIdx.x].tours, POPULATION_SIZE); } } int main(int argc, char **argv) { printf("THREADS:%d\n", NUM_THREADS); printf("BLOCKS:%d\n", BLOCKS); printf("TOURNAMENT_SIZE:%d\n", TOURNAMENT_SIZE); printf("NUM_EVOLUTIONS:%d\n", NUM_EVOLUTIONS); // Build city distances table tour_t initialTour; float costTable[NUM_CITIES*NUM_CITIES]; population_t populations[BLOCKS]; tour_t parents[NUM_POPULATIONS*POPULATION_SIZE*2]; // READS INITIAL TOUR FROM FILE ifstream file("berlin52.txt"); readTourFromFile(initialTour, file); // Build cost table to save time computing distance between cities // - array lookups are cheaper than squaring, adding, and sqrting buildCostTable(initialTour, costTable); //Memory Allocation population_t *d_populations; cudaMalloc((void **) &d_populations, BLOCKS * sizeof(population_t)); // array to store parents selected from tournament selection tour_t *d_parents; cudaMalloc((void **) &d_parents, sizeof(tour_t) * BLOCKS * NUM_THREADS * 2); // cost table for crossover function (SCX crossover) float *d_costTable; cudaMalloc((void **) &d_costTable, sizeof(float) * NUM_CITIES * NUM_CITIES); cudaMemcpy(d_costTable, &costTable, sizeof(float) * NUM_CITIES * NUM_CITIES, cudaMemcpyHostToDevice); curandState *d_state; cudaMalloc((void**)&d_state, BLOCKS * NUM_THREADS * sizeof(curandState)); // collects run results tour_t tours[NUM_TESTS]; //MAIN LOOP for (int k = 0; k < NUM_TESTS; ++k) { // Initializes all populations to NUMTHREADS number of individuals, randomized // Done on CPU (host) for (int i = 0; i < BLOCKS; ++i) initializePop(populations[i], initialTour); // copies data from host to device for evolution cudaMemcpy(d_populations, &populations, NUM_POPULATIONS * sizeof(population_t), cudaMemcpyHostToDevice); // ---------------------------------------------- // Times execution of evolve population on gpu // ----------------------------------------------- float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); cudaEventRecord (start); // ----------- // MAIN LOOP // ----------- // initialize random numbers array for tournament selection initCuRand <<< BLOCKS, NUM_THREADS >>> (d_state); // figure out distance and fitness for each individual in population evaluatePopulations <<< BLOCKS, NUM_THREADS >>> (d_populations, d_costTable); for (int i = 0; i < NUM_EVOLUTIONS; ++i) { selection <<< BLOCKS, NUM_THREADS >>> (d_populations, d_state, d_parents); // breed the population with tournament selection and SCX crossover // perform computation parallelized, build children iteratively for (int j = 1; j < NUM_CITIES; ++j) crossover <<< BLOCKS, NUM_THREADS >>> (d_populations, d_parents, d_state, d_costTable, j); mutate <<< BLOCKS, NUM_THREADS >>> (d_populations, d_state); evaluatePopulations <<< BLOCKS, NUM_THREADS >>> (d_populations, d_costTable); // migrate every 5 evolutions if (i % 3 == 0) migrate <<< BLOCKS, NUM_THREADS >>> (d_populations); } // ----------------------------------- // END MAIN LOOP // ----------------------------------- cudaEventRecord (stop); cudaEventSynchronize (stop); cudaEventElapsedTime (&milliseconds, start, stop); // copy memory back to device! cudaMemcpy(&populations, d_populations, NUM_POPULATIONS * sizeof(population_t), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); checkForError(); //printPopulation(initialPopulation); tour_t bestIndivs[NUM_POPULATIONS]; for (int i = 0; i < NUM_POPULATIONS; ++i) bestIndivs[i] = getFittestTour(populations[i].tours, NUM_THREADS); tour_t fittest = getFittestTour(bestIndivs, NUM_POPULATIONS); // --------------------- // PRINTS OUTPUT TO FILE // --------------------- printf("%f %f\n", milliseconds/1000, fittest.distance); //printf("Program execution time: %f sec", timeInitGPUPop+timeInitHostPop+(milliseconds/1000)+evalPopTime); tours[k] = fittest; } cudaFree(d_populations); cudaFree(d_parents); cudaFree(d_costTable); cudaFree(d_state); // tour_t mostFittest = getFittestTour(tours, NUM_TESTS); // printf("\nThe fittest tour OVERALL has length %f\n\n", mostFittest.distance); // printf("Winning Tour:\n"); // printTour(mostFittest); return 0; }
c6e739e13dfe2f1a003e25a46a330c9acbc38f57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <Windows.h> #include <sstream> #include "../cuGameEngine/gdiPlusInit.cuh" #include "../cuGameEngine/cuSurface.cuh" #include "../cuGameEngine/renderWindow.cuh" #include "../cuGameEngine/mathUtils.cuh" #include "../cuGameEngine/sdfTextRenderer.cuh" __global__ void renderMandelbrot(cuPixel* buffer, int64_t width, int64_t height, double nLeft, double nRight, double nTop, double nBottom, float maxIterations, float logMaxIterations, float log2) { using dataType = float; int xIdx = blockDim.x * blockIdx.x + threadIdx.x; int yIdx = blockDim.y * blockIdx.y + threadIdx.y; if (xIdx < width && yIdx < height) { float iterations = 0; dataType x = mapf(xIdx, 0, width, nLeft, nRight), x1 = 0, x2 = 0; dataType y = mapf(yIdx, 0, height, nTop, nBottom), y1 = 0, y2 = 0; while (iterations < maxIterations && x2 + y2 < 4.0f) { y1 = (x1 + x1) * y1 + y; x1 = x2 - y2 + x; x2 = x1 * x1; y2 = y1 * y1; ++iterations; } float i = (iterations - (float)(log(log(x1 * x1 + y1 * y1))) / log2) * (iterations != maxIterations); buffer[yIdx * width + xIdx].r = mapf(log(i), 0, logMaxIterations, 0, 255); buffer[yIdx * width + xIdx].g = mapf(log(i), 0, logMaxIterations, 0, 16); buffer[yIdx * width + xIdx].b = mapf(log(i), 0, logMaxIterations, 0, 32); } } class mandelbrotRenderer : public cuEffect { private: renderWindow wnd; sdfTextRenderer renderer{ L"lucidaconsole.fnt", L"lucidaconsole.png" }; double nLeft = -2, nRight = 2, nTop = -2, nBottom = 2; bool isZooming = false; float zoomDirection = 0; std::chrono::steady_clock::time_point lastRenderTimeStamp; public: mandelbrotRenderer() : wnd(1024, 768, true, L"Mandelbrot") { wnd.pipeLine->addEffect(this); wnd.inputMgr->key += createBoundHandler(&mandelbrotRenderer::onKey, this); wnd.inputMgr->keyDown += createBoundHandler(&mandelbrotRenderer::onKeyDown, this); wnd.inputMgr->keyUp += createBoundHandler(&mandelbrotRenderer::onKeyUp, this); wnd.inputMgr->mouseWheel += createBoundHandler(&mandelbrotRenderer::onMouseWheel, this); } void run() { lastRenderTimeStamp = std::chrono::high_resolution_clock::now(); bool isRunning = true; wnd.runLoop(true, false, isRunning); } void onKey(keyboardEventArgs* e) { if (e->key == VK_ESCAPE) { ExitProcess(0); } } void onKeyDown(keyboardEventArgs* e) { if (e->c == 'W') { isZooming = true; zoomDirection = -0.05; } else if (e->c == 'S') { isZooming = true; zoomDirection =0.05; } } void onKeyUp(keyboardEventArgs* e) { if (e->c == 'W') { isZooming = false; } else if (e->c == 'S') { isZooming = false; } } void onMouseWheel(mouseEventArgs* e) { auto m = e->delta > 0 ? 0.9f : 1.1f; zoom(m); } void zoom(float m) { auto nx = (((float)wnd.inputMgr->prevCursorPos.x / wnd.width) * 2.0f) - 1.0f; auto ny = (((float)wnd.inputMgr->prevCursorPos.y / wnd.height) * 2.0f) - 1.0f; auto x = map(nx, -1, 1, nLeft, nRight); auto y = map(ny, -1, 1, nTop, nBottom); nLeft = x + (nLeft - x) * m; nRight = x + (nRight - x) * m; nTop = y + (nTop - y) * m; nBottom = y + (nBottom - y) * m; } void apply(cuSurface* in, cuSurface* out) { auto timeStamp = std::chrono::high_resolution_clock::now(); if (isZooming) { auto zoomFactor = (timeStamp - lastRenderTimeStamp).count() / (50.0 * (1000.0 * 1000.0)); zoom(1 + zoomDirection * zoomFactor); } lastRenderTimeStamp = timeStamp; int64_t width, height; dim3 blocks, threads; calcGrid(in, out, width, height, blocks, threads); auto iterations = 10000; hipLaunchKernelGGL(( renderMandelbrot), dim3(blocks), dim3(threads), 0, 0, out->buffer, width, height, nLeft, nRight, nTop * (height / (double)width), nBottom * (height / (double)width), iterations, log(iterations), log(2.0f)); std::wstringstream str; str << "FPS:\t\t" << wnd.lastFps << "\nFrametime:\t" << wnd.lastTotalTime << "us" << "\nIterations:\t" << iterations << "\nLeft:\t\t" << nLeft << "\nTop:\t\t" << nTop << "\nRight:\t\t" << nRight << "\nBottom:\t\t" << nBottom << "\nZoom:\t\t" << (4.0 / (nRight - nLeft)) * 100.0 << "%"; renderer.renderString(out, str.str(), 4, 4, out->width, 0.5, cuPixel(255, 255, 255, 255), true); } }; int main() { auto renderer = new mandelbrotRenderer(); renderer->run(); }
c6e739e13dfe2f1a003e25a46a330c9acbc38f57.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <Windows.h> #include <sstream> #include "../cuGameEngine/gdiPlusInit.cuh" #include "../cuGameEngine/cuSurface.cuh" #include "../cuGameEngine/renderWindow.cuh" #include "../cuGameEngine/mathUtils.cuh" #include "../cuGameEngine/sdfTextRenderer.cuh" __global__ void renderMandelbrot(cuPixel* buffer, int64_t width, int64_t height, double nLeft, double nRight, double nTop, double nBottom, float maxIterations, float logMaxIterations, float log2) { using dataType = float; int xIdx = blockDim.x * blockIdx.x + threadIdx.x; int yIdx = blockDim.y * blockIdx.y + threadIdx.y; if (xIdx < width && yIdx < height) { float iterations = 0; dataType x = mapf(xIdx, 0, width, nLeft, nRight), x1 = 0, x2 = 0; dataType y = mapf(yIdx, 0, height, nTop, nBottom), y1 = 0, y2 = 0; while (iterations < maxIterations && x2 + y2 < 4.0f) { y1 = (x1 + x1) * y1 + y; x1 = x2 - y2 + x; x2 = x1 * x1; y2 = y1 * y1; ++iterations; } float i = (iterations - (float)(log(log(x1 * x1 + y1 * y1))) / log2) * (iterations != maxIterations); buffer[yIdx * width + xIdx].r = mapf(log(i), 0, logMaxIterations, 0, 255); buffer[yIdx * width + xIdx].g = mapf(log(i), 0, logMaxIterations, 0, 16); buffer[yIdx * width + xIdx].b = mapf(log(i), 0, logMaxIterations, 0, 32); } } class mandelbrotRenderer : public cuEffect { private: renderWindow wnd; sdfTextRenderer renderer{ L"lucidaconsole.fnt", L"lucidaconsole.png" }; double nLeft = -2, nRight = 2, nTop = -2, nBottom = 2; bool isZooming = false; float zoomDirection = 0; std::chrono::steady_clock::time_point lastRenderTimeStamp; public: mandelbrotRenderer() : wnd(1024, 768, true, L"Mandelbrot") { wnd.pipeLine->addEffect(this); wnd.inputMgr->key += createBoundHandler(&mandelbrotRenderer::onKey, this); wnd.inputMgr->keyDown += createBoundHandler(&mandelbrotRenderer::onKeyDown, this); wnd.inputMgr->keyUp += createBoundHandler(&mandelbrotRenderer::onKeyUp, this); wnd.inputMgr->mouseWheel += createBoundHandler(&mandelbrotRenderer::onMouseWheel, this); } void run() { lastRenderTimeStamp = std::chrono::high_resolution_clock::now(); bool isRunning = true; wnd.runLoop(true, false, isRunning); } void onKey(keyboardEventArgs* e) { if (e->key == VK_ESCAPE) { ExitProcess(0); } } void onKeyDown(keyboardEventArgs* e) { if (e->c == 'W') { isZooming = true; zoomDirection = -0.05; } else if (e->c == 'S') { isZooming = true; zoomDirection =0.05; } } void onKeyUp(keyboardEventArgs* e) { if (e->c == 'W') { isZooming = false; } else if (e->c == 'S') { isZooming = false; } } void onMouseWheel(mouseEventArgs* e) { auto m = e->delta > 0 ? 0.9f : 1.1f; zoom(m); } void zoom(float m) { auto nx = (((float)wnd.inputMgr->prevCursorPos.x / wnd.width) * 2.0f) - 1.0f; auto ny = (((float)wnd.inputMgr->prevCursorPos.y / wnd.height) * 2.0f) - 1.0f; auto x = map(nx, -1, 1, nLeft, nRight); auto y = map(ny, -1, 1, nTop, nBottom); nLeft = x + (nLeft - x) * m; nRight = x + (nRight - x) * m; nTop = y + (nTop - y) * m; nBottom = y + (nBottom - y) * m; } void apply(cuSurface* in, cuSurface* out) { auto timeStamp = std::chrono::high_resolution_clock::now(); if (isZooming) { auto zoomFactor = (timeStamp - lastRenderTimeStamp).count() / (50.0 * (1000.0 * 1000.0)); zoom(1 + zoomDirection * zoomFactor); } lastRenderTimeStamp = timeStamp; int64_t width, height; dim3 blocks, threads; calcGrid(in, out, width, height, blocks, threads); auto iterations = 10000; renderMandelbrot<<<blocks, threads>>>(out->buffer, width, height, nLeft, nRight, nTop * (height / (double)width), nBottom * (height / (double)width), iterations, log(iterations), log(2.0f)); std::wstringstream str; str << "FPS:\t\t" << wnd.lastFps << "\nFrametime:\t" << wnd.lastTotalTime << "us" << "\nIterations:\t" << iterations << "\nLeft:\t\t" << nLeft << "\nTop:\t\t" << nTop << "\nRight:\t\t" << nRight << "\nBottom:\t\t" << nBottom << "\nZoom:\t\t" << (4.0 / (nRight - nLeft)) * 100.0 << "%"; renderer.renderString(out, str.str(), 4, 4, out->width, 0.5, cuPixel(255, 255, 255, 255), true); } }; int main() { auto renderer = new mandelbrotRenderer(); renderer->run(); }
7a9e5cf744a7468d02e4320c871719751571a754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, Miroslav Stoyanov * * This file is part of * Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions * and the following disclaimer in the documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED. * THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT, * COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE. * THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF, * IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE. */ #ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU #define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU #include "tsgAcceleratedDataStructures.hpp" #include "tsgCudaLinearAlgebra.hpp" #include "tsgCudaBasisEvaluations.hpp" // several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads // thus we can set this to the CUDA max number of threads, based on the current cuda version constexpr int _MAX_CUDA_THREADS = 1024; /* * Create a 1-D CUDA thread grid using the total_threads and number of threads per block. * Basically, computes the number of blocks but no more than 65536. */ struct ThreadGrid1d{ // Compute the threads and blocks. ThreadGrid1d(long long total_threads, long long num_per_block) : threads(num_per_block), blocks(::min(total_threads / threads + ((total_threads % threads == 0) ? 0 : 1), 65536ll)) {} // number of threads int const threads; // number of blocks int const blocks; }; namespace TasGrid{ template<typename T> void TasGpu::dtrans2can(bool use01, int dims, int num_x, int pad_size, double const *gpu_trans_a, double const *gpu_trans_b, T const *gpu_x_transformed, T *gpu_x_canonical){ int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; hipLaunchKernelGGL(( tasgpu_transformed_to_canonical<T, double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), (2*pad_size) * sizeof(double), 0, dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical); if (use01)hipLaunchKernelGGL(( tasgpu_m11_to_01<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims * num_x, gpu_x_canonical); } template void TasGpu::dtrans2can<double>(bool, int, int, int, double const*, double const*, double const*, double*); template void TasGpu::dtrans2can<float>(bool, int, int, int, double const*, double const*, float const*, float*); // local polynomial basis functions, DENSE algorithm template<typename T> void TasGpu::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const T *gpu_nodes, const T *gpu_support, T *gpu_y){ // each block thread runs 1024 threads and processes 32 points (or basis functions) int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1); // order == 1 is considered "default" so that the compiler doesn't complain about missing default statement // semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid()) if (rule == rule_localp){ switch(order){ case 0: hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 0, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_localp0){ switch(order){ case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_localpb){ switch(order){ case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_semilocalp){ hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_semilocalp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); }else{ // rule == wavelet hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_wavelet, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } } template void TasGpu::devalpwpoly<double>(int, TypeOneDRule, int, int, int, const double*, const double*, const double*, double*); template void TasGpu::devalpwpoly<float>(int, TypeOneDRule, int, int, int, const float*, const float*, const float*, float*); // there is a switch statement that realizes templates for each combination of rule/order // make one function that covers that switch, the rest is passed from devalpwpoly_sparse template<typename T, int THREADS, int TOPLEVEL, bool fill> inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *x, const T *nodes, const T *support, const int *hpntr, const int *hindx, int num_roots, const int *roots, int *spntr, int *sindx, T *svals){ int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; if (rule == rule_localp){ switch(order){ case 0: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; case 2: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else if (rule == rule_localp0){ switch(order){ case 2: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else if (rule == rule_localpb){ switch(order){ case 2: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else{ // rule == rule_semilocalp hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0, dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } } // local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate) template<typename T> void TasGpu::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const GpuVector<T> &gpu_nodes, const GpuVector<T> &gpu_support, const GpuVector<int> &gpu_hpntr, const GpuVector<int> &gpu_hindx, const GpuVector<int> &gpu_hroots, GpuVector<int> &gpu_spntr, GpuVector<int> &gpu_sindx, GpuVector<T> &gpu_svals){ gpu_spntr.resize(num_x + 1); // call with fill == false to count the non-zeros per row of the matrix devalpwpoly_sparse_realize_rule_order<T, 64, 46, false> (order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(), gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0); std::vector<int> cpu_spntr; gpu_spntr.unload(cpu_spntr); cpu_spntr[0] = 0; int nz = 0; for(auto &i : cpu_spntr){ i += nz; nz = i; } gpu_spntr.load(cpu_spntr); gpu_sindx.resize(nz); gpu_svals.resize(nz); // call with fill == true to load the non-zeros devalpwpoly_sparse_realize_rule_order<T, 64, 46, true> (order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(), gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data()); } template void TasGpu::devalpwpoly_sparse<double>(int, TypeOneDRule, int, int, int, const double*, const GpuVector<double>&, const GpuVector<double>&, const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&, GpuVector<int>&, GpuVector<int>&, GpuVector<double>&); template void TasGpu::devalpwpoly_sparse<float>(int, TypeOneDRule, int, int, int, const float*, const GpuVector<float>&, const GpuVector<float>&, const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&, GpuVector<int>&, GpuVector<int>&, GpuVector<float>&); // Sequence Grid basis evaluations template<typename T> void TasGpu::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<T> &nodes, const GpuVector<T> &coeffs, T *gpu_result){ std::vector<int> offsets(dims); offsets[0] = 0; for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1); size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1); int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l; GpuVector<int> gpu_offsets(offsets); GpuVector<T> cache1D(num_total); int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1); hipLaunchKernelGGL(( tasgpu_dseq_build_cache<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data()); num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1); hipLaunchKernelGGL(( tasgpu_dseq_eval_sharedpoints<T, 32>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result); } template void TasGpu::devalseq<double>(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<double> &nodes, const GpuVector<double> &coeffs, double *gpu_result); template void TasGpu::devalseq<float>(int dims, int num_x, const std::vector<int> &max_levels, const float *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<float> &nodes, const GpuVector<float> &coeffs, float *gpu_result); // Fourier Grid basis evaluations template<typename T> void TasGpu::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, T *gpu_wreal, typename GpuVector<T>::value_type *gpu_wimag){ std::vector<int> max_nodes(dims); for(int j=0; j<dims; j++){ int n = 1; for(int i=0; i<max_levels[j]; i++) n *= 3; max_nodes[j] = n; } std::vector<int> offsets(dims); offsets[0] = 0; for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1); size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1); GpuVector<int> gpu_offsets(offsets); GpuVector<T> cache1D(num_total); int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1); hipLaunchKernelGGL(( tasgpu_dfor_build_cache<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data()); num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1); if (gpu_wimag == 0){ hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<T, 32, true>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0); }else{ hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<T, 32, false>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag); } } template void TasGpu::devalfor<double>(int, int, const std::vector<int>&, const double*, const GpuVector<int>&, const GpuVector<int>&, double*, double*); template void TasGpu::devalfor<float>(int, int, const std::vector<int>&, const float*, const GpuVector<int>&, const GpuVector<int>&, float*, float*); template<typename T> void TasGpu::devalglo(bool is_nested, bool is_clenshawcurtis0, int dims, int num_x, int num_p, int num_basis, T const *gpu_x, GpuVector<T> const &nodes, GpuVector<T> const &coeff, GpuVector<T> const &tensor_weights, GpuVector<int> const &nodes_per_level, GpuVector<int> const &offset_per_level, GpuVector<int> const &map_dimension, GpuVector<int> const &map_level, GpuVector<int> const &active_tensors, GpuVector<int> const &active_num_points, GpuVector<int> const &dim_offsets, GpuVector<int> const &map_tensor, GpuVector<int> const &map_index, GpuVector<int> const &map_reference, T *gpu_result){ GpuVector<T> cache(num_x, num_basis); int num_blocks = (int) map_dimension.size(); if (num_blocks >= 65536) num_blocks = 65536; if (is_nested){ if (is_clenshawcurtis0){ hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, true>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); }else{ hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); } }else{ hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, false, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); } int mat_size = num_x * num_p; num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; hipLaunchKernelGGL(( tasgpu_dglo_eval_zero<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, mat_size, gpu_result); num_blocks = (int) map_tensor.size(); if (num_blocks >= 65536) num_blocks = 65536; hipLaunchKernelGGL(( tasgpu_dglo_eval_sharedpoints<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims, num_x, (int) map_tensor.size(), num_p, cache.data(), tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(), map_tensor.data(), map_index.data(), map_reference.data(), gpu_result); } template void TasGpu::devalglo<double>(bool, bool, int, int, int, int, double const*, GpuVector<double> const&, GpuVector<double> const&, GpuVector<double> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, double*); template void TasGpu::devalglo<float>(bool, bool, int, int, int, int, float const*, GpuVector<float> const&, GpuVector<float> const&, GpuVector<float> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, float*); void TasGpu::fillDataGPU(double value, long long n, long long stride, double data[]){ if (stride == 1){ ThreadGrid1d tgrid(n, _MAX_CUDA_THREADS); hipLaunchKernelGGL(( tascuda_vfill<double, _MAX_CUDA_THREADS>), dim3(tgrid.blocks), dim3(tgrid.threads), 0, 0, n, data, value); }else{ ThreadGrid1d tgrid(n, 32); hipLaunchKernelGGL(( tascuda_sfill<double, 32>), dim3(tgrid.blocks), dim3(tgrid.threads), 0, 0, n, stride, data, value); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Linear Algebra //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__ void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1); blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1); while(blocks > 65536) blocks = 65536; hipLaunchKernelGGL(( tasgpu_cudaTgemm<double, 32, 96>), dim3(blocks), dim3(1024), 0, 0, M, N, K, gpu_a, gpu_b, gpu_c); } void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){ int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1); hipLaunchKernelGGL(( tasgpu_sparse_matmul<double, 64>), dim3(blocks), dim3(64), 0, 0, M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C); } void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){ int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks< 65536){ hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C); }else{ num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1); if (num_blocks< 65536){ hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C); }else{ num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1); if (num_blocks< 65536){ hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C); } } } } void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){ int n = num_rows * num_columns; int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; hipLaunchKernelGGL(( tascuda_fill<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, n, 0.0, destination); num_blocks = num_rows; if (num_blocks >= 65536) num_blocks = 65536; hipLaunchKernelGGL(( tascuda_sparse_to_dense<double, 64>), dim3(num_blocks), dim3(64), 0, 0, num_rows, num_columns, pntr, indx, vals, destination); } #endif } #endif
7a9e5cf744a7468d02e4320c871719751571a754.cu
/* * Copyright (c) 2017, Miroslav Stoyanov * * This file is part of * Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions * and the following disclaimer in the documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED. * THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT, * COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE. * THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF, * IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE. */ #ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU #define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU #include "tsgAcceleratedDataStructures.hpp" #include "tsgCudaLinearAlgebra.hpp" #include "tsgCudaBasisEvaluations.hpp" // several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads // thus we can set this to the CUDA max number of threads, based on the current cuda version constexpr int _MAX_CUDA_THREADS = 1024; /* * Create a 1-D CUDA thread grid using the total_threads and number of threads per block. * Basically, computes the number of blocks but no more than 65536. */ struct ThreadGrid1d{ // Compute the threads and blocks. ThreadGrid1d(long long total_threads, long long num_per_block) : threads(num_per_block), blocks(std::min(total_threads / threads + ((total_threads % threads == 0) ? 0 : 1), 65536ll)) {} // number of threads int const threads; // number of blocks int const blocks; }; namespace TasGrid{ template<typename T> void TasGpu::dtrans2can(bool use01, int dims, int num_x, int pad_size, double const *gpu_trans_a, double const *gpu_trans_b, T const *gpu_x_transformed, T *gpu_x_canonical){ int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; tasgpu_transformed_to_canonical<T, double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS, (2*pad_size) * sizeof(double)>>>(dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical); if (use01) tasgpu_m11_to_01<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(dims * num_x, gpu_x_canonical); } template void TasGpu::dtrans2can<double>(bool, int, int, int, double const*, double const*, double const*, double*); template void TasGpu::dtrans2can<float>(bool, int, int, int, double const*, double const*, float const*, float*); // local polynomial basis functions, DENSE algorithm template<typename T> void TasGpu::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const T *gpu_nodes, const T *gpu_support, T *gpu_y){ // each block thread runs 1024 threads and processes 32 points (or basis functions) int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1); // order == 1 is considered "default" so that the compiler doesn't complain about missing default statement // semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid()) if (rule == rule_localp){ switch(order){ case 0: tasgpu_devalpwpoly<T, 0, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; case 2: tasgpu_devalpwpoly<T, 2, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: tasgpu_devalpwpoly<T, 1, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_localp0){ switch(order){ case 2: tasgpu_devalpwpoly<T, 2, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: tasgpu_devalpwpoly<T, 1, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_localpb){ switch(order){ case 2: tasgpu_devalpwpoly<T, 2, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); break; default: tasgpu_devalpwpoly<T, 1, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } }else if (rule == rule_semilocalp){ tasgpu_devalpwpoly<T, 2, rule_semilocalp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); }else{ // rule == wavelet tasgpu_devalpwpoly<T, 1, rule_wavelet, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y); } } template void TasGpu::devalpwpoly<double>(int, TypeOneDRule, int, int, int, const double*, const double*, const double*, double*); template void TasGpu::devalpwpoly<float>(int, TypeOneDRule, int, int, int, const float*, const float*, const float*, float*); // there is a switch statement that realizes templates for each combination of rule/order // make one function that covers that switch, the rest is passed from devalpwpoly_sparse template<typename T, int THREADS, int TOPLEVEL, bool fill> inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *x, const T *nodes, const T *support, const int *hpntr, const int *hindx, int num_roots, const int *roots, int *spntr, int *sindx, T *svals){ int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; if (rule == rule_localp){ switch(order){ case 0: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; case 2: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else if (rule == rule_localp0){ switch(order){ case 2: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else if (rule == rule_localpb){ switch(order){ case 2: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); break; default: tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } }else{ // rule == rule_semilocalp tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill><<<num_blocks, THREADS>>> (dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals); } } // local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate) template<typename T> void TasGpu::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const GpuVector<T> &gpu_nodes, const GpuVector<T> &gpu_support, const GpuVector<int> &gpu_hpntr, const GpuVector<int> &gpu_hindx, const GpuVector<int> &gpu_hroots, GpuVector<int> &gpu_spntr, GpuVector<int> &gpu_sindx, GpuVector<T> &gpu_svals){ gpu_spntr.resize(num_x + 1); // call with fill == false to count the non-zeros per row of the matrix devalpwpoly_sparse_realize_rule_order<T, 64, 46, false> (order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(), gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0); std::vector<int> cpu_spntr; gpu_spntr.unload(cpu_spntr); cpu_spntr[0] = 0; int nz = 0; for(auto &i : cpu_spntr){ i += nz; nz = i; } gpu_spntr.load(cpu_spntr); gpu_sindx.resize(nz); gpu_svals.resize(nz); // call with fill == true to load the non-zeros devalpwpoly_sparse_realize_rule_order<T, 64, 46, true> (order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(), gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data()); } template void TasGpu::devalpwpoly_sparse<double>(int, TypeOneDRule, int, int, int, const double*, const GpuVector<double>&, const GpuVector<double>&, const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&, GpuVector<int>&, GpuVector<int>&, GpuVector<double>&); template void TasGpu::devalpwpoly_sparse<float>(int, TypeOneDRule, int, int, int, const float*, const GpuVector<float>&, const GpuVector<float>&, const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&, GpuVector<int>&, GpuVector<int>&, GpuVector<float>&); // Sequence Grid basis evaluations template<typename T> void TasGpu::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<T> &nodes, const GpuVector<T> &coeffs, T *gpu_result){ std::vector<int> offsets(dims); offsets[0] = 0; for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1); size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1); int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l; GpuVector<int> gpu_offsets(offsets); GpuVector<T> cache1D(num_total); int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1); tasgpu_dseq_build_cache<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data()); num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1); tasgpu_dseq_eval_sharedpoints<T, 32><<<num_blocks, 1024>>> (dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result); } template void TasGpu::devalseq<double>(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<double> &nodes, const GpuVector<double> &coeffs, double *gpu_result); template void TasGpu::devalseq<float>(int dims, int num_x, const std::vector<int> &max_levels, const float *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, const GpuVector<float> &nodes, const GpuVector<float> &coeffs, float *gpu_result); // Fourier Grid basis evaluations template<typename T> void TasGpu::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes, const GpuVector<int> &points, T *gpu_wreal, typename GpuVector<T>::value_type *gpu_wimag){ std::vector<int> max_nodes(dims); for(int j=0; j<dims; j++){ int n = 1; for(int i=0; i<max_levels[j]; i++) n *= 3; max_nodes[j] = n; } std::vector<int> offsets(dims); offsets[0] = 0; for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1); size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1); GpuVector<int> gpu_offsets(offsets); GpuVector<T> cache1D(num_total); int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1); tasgpu_dfor_build_cache<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data()); num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1); if (gpu_wimag == 0){ tasgpu_dfor_eval_sharedpoints<T, 32, true><<<num_blocks, 1024>>> (dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0); }else{ tasgpu_dfor_eval_sharedpoints<T, 32, false><<<num_blocks, 1024>>> (dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag); } } template void TasGpu::devalfor<double>(int, int, const std::vector<int>&, const double*, const GpuVector<int>&, const GpuVector<int>&, double*, double*); template void TasGpu::devalfor<float>(int, int, const std::vector<int>&, const float*, const GpuVector<int>&, const GpuVector<int>&, float*, float*); template<typename T> void TasGpu::devalglo(bool is_nested, bool is_clenshawcurtis0, int dims, int num_x, int num_p, int num_basis, T const *gpu_x, GpuVector<T> const &nodes, GpuVector<T> const &coeff, GpuVector<T> const &tensor_weights, GpuVector<int> const &nodes_per_level, GpuVector<int> const &offset_per_level, GpuVector<int> const &map_dimension, GpuVector<int> const &map_level, GpuVector<int> const &active_tensors, GpuVector<int> const &active_num_points, GpuVector<int> const &dim_offsets, GpuVector<int> const &map_tensor, GpuVector<int> const &map_index, GpuVector<int> const &map_reference, T *gpu_result){ GpuVector<T> cache(num_x, num_basis); int num_blocks = (int) map_dimension.size(); if (num_blocks >= 65536) num_blocks = 65536; if (is_nested){ if (is_clenshawcurtis0){ tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, true><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); }else{ tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, false><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); } }else{ tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, false, false><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(), nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(), map_dimension.data(), map_level.data(), cache.data()); } int mat_size = num_x * num_p; num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; tasgpu_dglo_eval_zero<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(mat_size, gpu_result); num_blocks = (int) map_tensor.size(); if (num_blocks >= 65536) num_blocks = 65536; tasgpu_dglo_eval_sharedpoints<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>> (dims, num_x, (int) map_tensor.size(), num_p, cache.data(), tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(), map_tensor.data(), map_index.data(), map_reference.data(), gpu_result); } template void TasGpu::devalglo<double>(bool, bool, int, int, int, int, double const*, GpuVector<double> const&, GpuVector<double> const&, GpuVector<double> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, double*); template void TasGpu::devalglo<float>(bool, bool, int, int, int, int, float const*, GpuVector<float> const&, GpuVector<float> const&, GpuVector<float> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, float*); void TasGpu::fillDataGPU(double value, long long n, long long stride, double data[]){ if (stride == 1){ ThreadGrid1d tgrid(n, _MAX_CUDA_THREADS); tascuda_vfill<double, _MAX_CUDA_THREADS><<<tgrid.blocks, tgrid.threads>>>(n, data, value); }else{ ThreadGrid1d tgrid(n, 32); tascuda_sfill<double, 32><<<tgrid.blocks, tgrid.threads>>>(n, stride, data, value); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Linear Algebra //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__ void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1); blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1); while(blocks > 65536) blocks = 65536; tasgpu_cudaTgemm<double, 32, 96><<<blocks, 1024>>>(M, N, K, gpu_a, gpu_b, gpu_c); } void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){ int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1); tasgpu_sparse_matmul<double, 64><<<blocks, 64>>>(M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C); } void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){ int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks< 65536){ tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C); }else{ num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1); if (num_blocks< 65536){ tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C); }else{ num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1); if (num_blocks< 65536){ tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C); } } } } void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){ int n = num_rows * num_columns; int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1); if (num_blocks >= 65536) num_blocks = 65536; tascuda_fill<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(n, 0.0, destination); num_blocks = num_rows; if (num_blocks >= 65536) num_blocks = 65536; tascuda_sparse_to_dense<double, 64><<<num_blocks, 64>>>(num_rows, num_columns, pntr, indx, vals, destination); } #endif } #endif
ada313030dc137e9c3895420fc1230f39fa86b6f.hip
// !!! This is a file automatically generated by hipify!!! #include "XBeachGPU.h" void CUDA_CHECK(hipError_t CUDerr) { if (hipSuccess != CUDerr) { fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString(CUDerr)); write_text_to_log_file("Cuda error in file " + std::string(__FILE__) + " in line " + std::to_string(__LINE__) + " " + std::string(hipGetErrorString(CUDerr))); exit(EXIT_FAILURE); } } XBGPUParam waveinitGPU(XBGPUParam Param, std::vector<Wavebndparam> wavebnd) { // Initialize wave model int nx, ny; nx = Param.nx; ny = Param.ny; if (Param.dtheta > 0.0) { Param.ntheta = round((Param.thetamax - Param.thetamin) / Param.dtheta); } else { if (Param.ntheta == 0) { Param.ntheta = 1; } Param.dtheta = (Param.thetamax - Param.thetamin) / Param.ntheta; } ntheta = Param.ntheta; dtheta = Param.dtheta; printf("ntheta=%d\tdtheta=%f\n", ntheta, dtheta); write_text_to_log_file("ntheta=" + std::to_string(ntheta) + "\t dtheta=" + std::to_string(dtheta)); //printf("nwavbnd=%d\n", nwavbnd); if (Param.wavebndtype == 1) { nwavbnd = wavebnd.size(); // one Stfile/qfile will be used throughout the simulation } if (Param.wavebndtype >= 2) { nwavbnd = ceil(Param.rtlength / Param.dtbc)+1; // +1 needed here } theta = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); Stfile = (double *)malloc(ntheta*ny*nwavbnd*sizeof(double)); qfile = (double *)malloc(4 * ny*nwavbnd*sizeof(double)); Tpfile = (double *)malloc(nwavbnd*sizeof(double)); //dummy=(double *)malloc(1000*sizeof(double)); qbndnew = (DECNUM *)malloc(4 * ny*sizeof(DECNUM)); qbndold = (DECNUM *)malloc(4 * ny*sizeof(DECNUM)); St = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); Stold = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); Stnew = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); cxsth = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); sxnth = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); for (int i = 0; i < ntheta; i++) { theta[i] = i*(Param.dtheta)+Param.thetamin + 0.5f*Param.dtheta; cxsth[i] = cos(theta[i]); sxnth[i] = sin(theta[i]); //printf("theta=%f\tcxsth=%f\tsxnth=%f\n", theta[i], cxsth[i], sxnth[i]); } dang = theta[1] - theta[0]; //dtheta=dang; ee = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); dd = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); wete = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); rr = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cgx = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cgy = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cx = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cy = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); ctheta = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); thet = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); //drr=(DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); printf("Generating initial wave bnd data\n"); write_text_to_log_file("Generating initial wave bnd data"); if (Param.wavebndtype == 1) { //GenCstWave(Param, wavebnd, Stfile, qfile, Tpfile); GenCstWave(Param, wavebnd, theta, Stfile, qfile, Tpfile); Trep = Tpfile[0]; //readStatbnd(nx, ny, ntheta, Param.rho, Param.g, Param.wavebndfile.c_str(), Tpfile, Stfile); //Trepold = Tpfile[0]; //Trepnew = Tpfile[1]; //rt = dtwavbnd; } if (Param.wavebndtype == 2) { //readXbbndstep(nx, ny, ntheta, Param.wavebndfile.c_str(), 1, Trepold, qfile, Stfile); readXbbndstep(Param, wavebnd, 0, Trep, qfile, Stfile); } if (Param.wavebndtype == 3) { // Reuse XBeach_GPU style wave boundary. same as normal XBeach but as a self documented netcdf file read_reuse_bndnc(Param, 0, Trep, qfile, Stfile); } if (Param.wavebndtype == 4) { //JONSWAP //First generate a Highres 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; makjonswap(Param, wavebnd, 0, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } createbndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, 0.0, wavebnd[0].Hs, Trep, wavebnd[0].Tp, wavebnd[0].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } if (Param.wavebndtype == 5) { // //SWAN spectrum //First read in the 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; readSWANSPEC(Param, wavebnd, 0, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } createbndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, 0.0, wavebnd[0].Hs, Trep, wavebnd[0].Tp, wavebnd[0].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } nwbndstep = 0; for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } //fscanf(fwav,"%f\t%f\t%f\t%f\t%f\t%f",&hm0gew,&fp,&mainang,&scoeff,&gam,&rt); //mainang=(1.5*pi-grdalpha)-mainang*pi/180; //fp=1/fp; //printf("init rt=%f\n",rt); //makjonswap(hm0gew,fp,mainang,rt,scoeff,gam,theta,ntheta,Trepnew, Stnew); //Clac Stat for (int i = 0; i < ntheta; i++) //! Fill St { //St[i]=Stold[i]; //printf("St[%d]=%f\n",i,St[i]); for (int ii = 0; ii < ny; ii++) { St[ii + i*ny] = Stold[ii + i*ny]; } } //printf("hh=%f\n",hh[0]); // Apply bnd on CPU side for (int ii = 0; ii < nx; ii++) { for (int jj = 0; jj < ny; jj++) { for (int nt = 0; nt < ntheta; nt++) { if (ii == 0) { ee[0 + jj*nx + nt*nx*ny] = St[jj + nt*ny];// not on gpu since it is a bank conflicting problem } else { ee[ii + jj*nx + nt*nx*ny] = 0.0f; } rr[ii + jj*nx + nt*nx*ny] = 0.0f; } } } //run dispersion relation return Param; } void wavebndOLD(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; if (totaltime >= dtwavbnd*(nwavbnd*wxstep - 1))//The -1 here is so that we read the next file before the last step of the previous file runs out { if (Param.wavebndtype == 2) { //readXbbndstep(nx, ny, ntheta, Param.wavebndfile.c_str(), wxstep, Trep, qfile, Stfile); } nwbndstep = 0; for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } if (Param.wavebndtype == 2) { for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } } wxstep = wxstep + 1; } //if ((nstep==1 || nstep==nwstp) && (imodel==1 || imodel>=3)) //{ //update wave bnd if (totaltime >= wavbndtime /*&& wavebndtype==2*/) { for (int i = 0; i < ntheta; i++) //! Fill Stold { for (int ni = 0; ni < ny; ni++) { Stold[ni + i*ny] = Stfile[ni + i*ny + nwbndstep*ntheta*ny]; } } //fscanf(fwav,"%f\t%f\t%f\t%f\t%f\t%f",&hm0gew,&fp,&mainang,&scoeff,&gam,&rt); //mainang=(1.5*pi-grdalpha)-mainang*pi/180; //printf("rt=%f\n",rt); //fp=1/fp; //fscanf(fwav,"%f\t%f",&rt,&Trepnew); nwbndstep = nwbndstep + 1; for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { Stnew[ni + i*ny] = Stfile[ni + i*ny + nwbndstep*ntheta*ny]; if (Param.wavebndtype == 1) { Trep = Tpfile[nwbndstep]; } } } if (Param.wavebndtype == 2) { for (int ni = 0; ni < ny; ni++) { for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qbndnew[ni + ny*xi]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; } } if (Param.GPUDEVICE >= 0) { CUDA_CHECK(hipMemcpy(qbndold_g, qbndold, 4 * ny*sizeof(DECNUM), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(qbndnew_g, qbndnew, 4 * ny*sizeof(DECNUM), hipMemcpyHostToDevice)); } //printf("qbndold[300]=%f\n",qbndold[300]); //printf("qbndnew[300]=%f\n",qbndnew[300]); } //printf("Stfile[0]=%f\n",Stfile[0]); //makjonswap(hm0gew,fp,mainang,rt,scoeff,gam,theta,ntheta,Trepnew, Stnew); //wavbndtime=wavbndtime+dtwavbnd; wavbndtime = nwbndstep*dtwavbnd + (wxstep - 1)*nwavbnd*dtwavbnd; //should be better than above as it will not accumulate the rounbd off error } for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { St[ni + i*ny] = Stold[ni + i*ny] + (totaltime - wavbndtime + dtwavbnd)*(Stnew[ni + i*ny] - Stold[ni + i*ny]) / dtwavbnd; } //printf("St[%d]=%f\n",i,St[i*ny]); } //printf("Wave timestep:%f\n",wdt); //Wave model step //wavestep(); //nwstp = nstep + nstpw; //wdt = dt; //} } void wavebnd(XBGPUParam Param, std::vector<Wavebndparam> wavebndvec) { int nx, ny; double timenext, timesincelast; nx = Param.nx; ny = Param.ny; //update sl bnd // find next timestep double difft = wavebndvec[WAVstepinbnd].time - totaltime; if (difft < 0.0) { WAVstepinbnd++; if (Param.wavebndtype == 2) { //Read new STfile and qfile XBeach style readXbbndstep(Param, wavebndvec, WAVstepinbnd - 1, Trep, qfile, Stfile); } if (Param.wavebndtype == 3) { // Reuse XBeach_GPU style wave boundary. same as normal XBeach but as a self documented netcdf file read_reuse_bndnc(Param, WAVstepinbnd - 1, Trep, qfile, Stfile); } if (Param.wavebndtype == 4) { //JONSWAP //First generate a Highres 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; makjonswap(Param, wavebndvec, WAVstepinbnd - 1, nfHR, ndHR, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// // Stfile is not ordered teh way we want to save it to file so we need a temporary storage to rearange double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = (int)ceil(Param.rtlength / Param.dtbc)+1; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } writebndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, wavebndvec[WAVstepinbnd - 1].time, wavebndvec[WAVstepinbnd - 1].Hs, Trep, wavebndvec[WAVstepinbnd - 1].Tp, wavebndvec[WAVstepinbnd - 1].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void writebndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); } if (Param.wavebndtype == 5) { // //SWAN spectrum //First read in the 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; readSWANSPEC(Param, wavebndvec, WAVstepinbnd - 1, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } writebndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, wavebndvec[WAVstepinbnd - 1].time, wavebndvec[WAVstepinbnd - 1].Hs, Trep, wavebndvec[WAVstepinbnd - 1].Tp, wavebndvec[WAVstepinbnd - 1].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } } //spetial treatment when difft == 0.0 if (Param.wavebndtype == 1) { nwbndstep = WAVstepinbnd - 1; timenext = wavebndvec[WAVstepinbnd].time - wavebndvec[WAVstepinbnd - 1].time; timesincelast = (totaltime - wavebndvec[WAVstepinbnd - 1].time); } if (Param.wavebndtype >= 2) { nwbndstep = min(floor((totaltime - wavebndvec[WAVstepinbnd - 1].time) / Param.dtbc),ceil(Param.rtlength/Param.dtbc)-1); //nwbndstep = nwbndstep + 1;// trying to solve discrepency between XB and XBGPU bnd timenext = Param.dtbc; timesincelast = totaltime - (nwbndstep*Param.dtbc + wavebndvec[WAVstepinbnd - 1].time); //nwbndstep = nwbndstep + 1;//trying to solve discrepency between XB and XBGPU bnd } for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { St[ni + i*ny] = interptime(Stnew[ni + i*ny], Stold[ni + i*ny], timenext, timesincelast); } } if (Param.flow == 1) { CUDA_CHECK(hipMemcpy(qbndold_g, qbndold, 4 * ny*sizeof(DECNUM), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(qbndnew_g, qbndnew, 4 * ny*sizeof(DECNUM), hipMemcpyHostToDevice)); } } void wavestep(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; double dt = Param.dt; ntheta = Param.ntheta; //Subroutine runs the wave model dim3 blockDim(16, 16, 1); dim3 gridDim(ceil((nx*1.0f) / blockDim.x), ceil((ny*1.0f) / blockDim.y), 1); dim3 blockDim4(4, 4, 1); dim3 gridDim4(ceil((nx*1.0f) / blockDim.x), ceil((ny*1.0f) / blockDim.y), 1); CUDA_CHECK(hipMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM), hipMemcpyHostToDevice)); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) offshorebndWav << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, totaltime, Trep, St_g, sigm_g, ee_g); //CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //Sanity check sanity << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, sigm_g, ntheta, ee_g); //CUT_CHECK_ERROR("sanity execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMalloc((void **)&cg_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( hipMalloc((void **)&cx_g, nx*ny*ntheta*sizeof(DECNUM )) ); // CUDA_CHECK( hipMalloc((void **)&c_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( hipMalloc((void **)&cy_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( hipMalloc((void **)&k_g, nx*ny*sizeof(DECNUM )) ); // not sure this is worth it we'd rather allocate from main and kill when it is all done... CUDA_CHECK(hipMalloc((void **)&kh_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&sinh2kh_g, nx*ny*sizeof(DECNUM))); //dispersion dispersion << <gridDim, blockDim, 0 >> >(nx, ny, twopi, Param.g, aphi, bphi, sigm_g, hh_g, k_g, c_g, kh_g, sinh2kh_g, cg_g); //CUT_CHECK_ERROR("dispersion execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMemcpy(C,kh_g, ny*nx*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); CUDA_CHECK(hipMalloc((void **)&dhdx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&dhdy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&dudx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&dudy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&dvdx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&dvdy_g, nx*ny*sizeof(DECNUM))); // Wave current interaction (i.e remove wci in shallow water) calcwci << <gridDim, blockDim, 0 >> >(nx, ny, Param.wci, Param.hwci, hh_g, wci_g); //CUT_CHECK_ERROR("calcwci execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Slopes of water depth and velocities slopes << <gridDim, blockDim, 0 >> >(nx, ny, Param.dx, hh_g, uu_g, vv_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g);// //CUT_CHECK_ERROR("slopes execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMalloc((void **)&cgx_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( hipMalloc((void **)&cgy_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( hipMalloc((void **)&ctheta_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( hipMemcpy(C,kh_g, ny*nx*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); //Propagation speed in theta space propagtheta << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, wci_g, ctheta_g,/*c_g,cx_g,cy_g,*/cxsth_g, sxnth_g,/*uu_g,vv_g,*/dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g, sigm_g, kh_g);// //CUT_CHECK_ERROR("propagtheta execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); ////////// //CUDA_CHECK( hipMemcpy(ctheta,ctheta_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); ////////// CUDA_CHECK(hipFree(dhdx_g)); CUDA_CHECK(hipFree(dhdy_g)); CUDA_CHECK(hipFree(dudx_g)); CUDA_CHECK(hipFree(dudy_g)); CUDA_CHECK(hipFree(dvdx_g)); CUDA_CHECK(hipFree(dvdy_g)); // //read3Dnc(nx,ny,ntheta,"eeX.nc",ee); //CUDA_CHECK( hipMemcpy(ee_g, ee, nx*ny*ntheta*sizeof(DECNUM ), hipMemcpyHostToDevice) ); // // transform to wave action // action << <gridDim, blockDim, 0 >> >(ntheta, nx, ny, ee_g, sigm_g); //CUT_CHECK_ERROR("action execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Upwind Euler timestep propagation // CUDA_CHECK(hipMalloc((void **)&xadvec_g, nx*ny*ntheta*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&yadvec_g, nx*ny*ntheta*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&thetaadvec_g, nx*ny*ntheta*sizeof(DECNUM))); xadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, ee_g, cg_g, cxsth_g, uu_g, xadvec_g); //CUT_CHECK_ERROR("eulerupwind xadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); yadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, ee_g, cg_g, sxnth_g, vv_g, yadvec_g); //CUT_CHECK_ERROR("eulerupwind yadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMalloc((void **)&eect_g, nx*ny*ntheta*sizeof(DECNUM )) ); //eectheta<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,ee_g,ctheta_g,eect_g); ////CUT_CHECK_ERROR("eulerupwind eectheta execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); //thetaadvecuw<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,dtheta,eect_g,thetaadvec_g); ////CUT_CHECK_ERROR("eulerupwind thetaadvecuw execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); thetaadvecuw2ho << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, ee_g, ctheta_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind thetaadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMemcpy(ctheta,yadvec_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); //CUDA_CHECK( hipMemcpy(ctheta,thetaadvec_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); //read3Dnc(nx,ny,ntheta,"xadvecX.nc",ee); //CUDA_CHECK( hipMemcpy(xadvec_g, ee, nx*ny*sizeof(DECNUM ), hipMemcpyHostToDevice) ); //read3Dnc(nx,ny,ntheta,"yadvecX.nc",ee); //CUDA_CHECK( hipMemcpy(yadvec_g, ee, nx*ny*sizeof(DECNUM ), hipMemcpyHostToDevice) ); //read3Dnc(nx,ny,ntheta,"thetaadvecX.nc",ee); //CUDA_CHECK( hipMemcpy(thetaadvec_g, ee, nx*ny*sizeof(DECNUM ), hipMemcpyHostToDevice) ); eulerupwind << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, ee_g, xadvec_g, yadvec_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipFree(cgx_g)); //CUDA_CHECK( hipFree(cgy_g)); //Fix lateraL BND rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, ee_g); //CUT_CHECK_ERROR("energy latbnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // transform back to wave energy // energy << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, ee_g, sigm_g); //CUT_CHECK_ERROR("energy execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMemcpy(ctheta,ee_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); //CUDA_CHECK( hipMalloc((void **)&H_g, nx*ny*sizeof(DECNUM )) ); CUDA_CHECK(hipMalloc((void **)&E_g, nx*ny*sizeof(DECNUM))); //CUDA_CHECK( hipMalloc((void **)&D_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( hipMemcpy(ctheta,ee_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); // // Energy integrated over wave directions,Hrms // energint << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.rho, Param.g, Param.gammax, E_g, H_g, hh_g, ee_g); //CUT_CHECK_ERROR("energint execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // calculate change in intrinsic frequency // removed because it is super slow and doesn't do much // // tm is thetamean and it is calculated in the mean dir scheme // CUDA_CHECK( hipMalloc((void **)&tm_g, nx*ny*sizeof(DECNUM )) ); // calctm<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,tm_g,theta_g,ee_g); // //CUT_CHECK_ERROR("energint execution failed\n"); // CUDA_CHECK( hipDeviceSynchronize() ); /* //Change of intrinsec frequency */ // // Total dissipation from breaking and bottom friction // if (Param.breakmodel == 1) { roelvink << <gridDim, blockDim, 0 >> >(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g); //CUT_CHECK_ERROR("roelvink execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); } else { baldock << <gridDim, blockDim, 0 >> > (nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g);//Baldock more appropriate for pseudo stationary cases //CUT_CHECK_ERROR("baldoc execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); } // // Calculate roller energy balance // //CUDA_CHECK( hipMemcpy(hhmean,E_g, nx*ny*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); if (Param.roller == 1) { xadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, rr_g, c_g, cxsth_g, uu_g, xadvec_g); //CUT_CHECK_ERROR("eulerupwind xadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); yadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, rr_g, c_g, sxnth_g, vv_g, yadvec_g); //CUT_CHECK_ERROR("eulerupwind yadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //eectheta<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,rr_g,ctheta_g,eect_g); ////CUT_CHECK_ERROR("eulerupwind eectheta execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); //thetaadvecuw<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,dtheta,eect_g,thetaadvec_g); ////CUT_CHECK_ERROR("eulerupwind thetaadvecuw execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); thetaadvecuw2ho << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, rr_g, ctheta_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind thetaadvec execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); eulerupwind << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, rr_g, xadvec_g, yadvec_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Adjust lateral bnds // rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, rr_g); //CUT_CHECK_ERROR("rollerlatbnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); } //CUDA_CHECK( hipFree(eect_g)); CUDA_CHECK(hipFree(xadvec_g)); CUDA_CHECK(hipFree(yadvec_g)); CUDA_CHECK(hipFree(thetaadvec_g)); //read2Dnc(nx,ny,"D.nc",uu); //CUDA_CHECK( hipMemcpy(D_g, uu, nx*ny*sizeof(DECNUM ), hipMemcpyHostToDevice) ); // // Distribution of dissipation over directions and frequencies // dissipation << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.eps, dt, Param.g, Param.beta, wci_g, hh_g, ee_g, D_g, E_g, rr_g, c_g, cxsth_g, sxnth_g, uu_g, vv_g, DR_g, R_g); //CUT_CHECK_ERROR("dissipation execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // //Fix lateraL BND // rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, ee_g); //CUT_CHECK_ERROR("energy latbnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Compute mean wave direction // meandir << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.rho, Param.g, dtheta, ee_g, theta_g, thetamean_g, E_g, H_g); //CUT_CHECK_ERROR("meandir execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Constant warm start // WARNING ONLY TO BE USED FOR DEBUGGING // // read3Dnc(nx,ny,ntheta,"eeX.nc",ee); // CUDA_CHECK( hipMemcpy(ee_g, ee, nx*ny*ntheta*sizeof(DECNUM ), hipMemcpyHostToDevice) ); // // Radiation stresses and forcing terms // CUDA_CHECK(hipMalloc((void **)&Sxx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&Sxy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(hipMalloc((void **)&Syy_g, nx*ny*sizeof(DECNUM))); radstress << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.dx, dtheta, ee_g, rr_g, cxsth_g, sxnth_g, cg_g, c_g, Sxx_g, Sxy_g, Syy_g); //CUT_CHECK_ERROR("radstress execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); // // Wave forces // wavforce << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.dx, dtheta, Sxx_g, Sxy_g, Syy_g, Fx_g, Fy_g, hh_g); //CUT_CHECK_ERROR("wavforce execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); twodimbndnoix << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, Fx_g); //CUT_CHECK_ERROR("wave force X bnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); twodimbnd << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, Fy_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //CUDA_CHECK( hipMemcpy(ctheta,ctheta_g, ny*nx*ntheta*sizeof(DECNUM ), hipMemcpyDeviceToHost) ); // // CAlculate stokes velocity and breaker delay //Breaker delay removed because it is slow and kinda useless // breakerdelay << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.g, Param.rho, Trep, Param.eps, urms_g, ust_g, H_g, E_g, c_g, k_g, hh_g, R_g); //CUT_CHECK_ERROR("breakerdelay execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); //twodimbnd<<<gridDim, blockDim, 0>>>(nx,ny,eps,hh_g,urms_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); twodimbnd << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, ust_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipFree(Sxy_g)); CUDA_CHECK(hipFree(Sxx_g)); CUDA_CHECK(hipFree(Syy_g)); //CUDA_CHECK( hipFree(cg_g)); //CUDA_CHECK( hipFree(c_g)); CUDA_CHECK(hipFree(tm_g)); // // Adjust Offshore Bnd // //CUDA_CHECK( hipMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM ), hipMemcpyHostToDevice) ); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) //offshorebndWav<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g); ////CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); CUDA_CHECK(hipFree(E_g)); //CUDA_CHECK( hipFree(H_g)); //CUDA_CHECK( hipFree(D_g)); //CUDA_CHECK( hipFree(k_g)); CUDA_CHECK(hipFree(kh_g)); CUDA_CHECK(hipFree(sinh2kh_g)); } void wavestepCPU(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; //Subroutine runs the wave model //printf("%2.2f\t",ee_g[16+8*nx+1*ntheta]); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) offshorebndWavCPU(nx, ny, ntheta, totaltime, Trep, St, sigm_g, ee_g); //Sanity check sanityCPU(nx, ny, Param.eps, hh_g, sigm_g, ntheta, ee_g); //printf("%2.2f\t", ee_g[16 + 8 * nx + 1 * ntheta]); //dispersion //printf("%2.2f\t", ee_g[16 + 8 * nx + 1 * ntheta]); dispersionCPU(nx, ny, twopi, Param.g, aphi, bphi, sigm_g, hh_g, k_g, c_g, kh_g, sinh2kh_g, cg_g); // Wave current interaction (i.e remove wci in shallow water) calcwciCPU(nx, ny, Param.wci, Param.hwci, hh_g, wci_g); //printf("%f\t",ee_g[0+16*nx+6*nx*ny]); // Slopes of water depth and velocities slopesCPU(nx, ny, Param.dx, hh_g, uu_g, vv_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g);// //printf("%f\t",ee_g[0+16*nx+6*nx*ny]); //Propagation speed in theta space propagthetaCPU(nx, ny, ntheta, wci_g, ctheta_g, cxsth_g, sxnth_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g, sigm_g, kh_g);// //printf("%f\n",ee_g[200+16*nx]); //read3Dnc(nx, ny, ntheta, "eeX.nc", ee_g); //printf("%f\n", ee_g[200 + 16 * nx]); // transform to wave action actionCPU(ntheta, nx, ny, ee_g, sigm_g); // Upwind Euler timestep propagation xadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, ee_g, cg_g, cxsth_g, uu_g, xadvec_g); yadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, ee_g, cg_g, sxnth_g, vv_g, yadvec_g); thetaadvecuw2hoCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, ee_g, ctheta_g, thetaadvec_g); //Apply advection eulerupwindCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, ee_g, xadvec_g, yadvec_g, thetaadvec_g); //Fix lateraL BND rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, ee_g); // transform back to wave energy energyCPU(nx, ny, ntheta, ee_g, sigm_g); // Energy integrated over wave directions,Hrms energintCPU(nx, ny, ntheta, dtheta, Param.rho, Param.g, Param.gammax, E_g, H_g, hh_g, ee_g); // // calculate change in intrinsic frequency // removed because it is super slow and doesn't do much // // tm is thetamean and it is calculated in the mean dir scheme // CUDA_CHECK( hipMalloc((void **)&tm_g, nx*ny*sizeof(DECNUM )) ); // calctm<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,tm_g,theta_g,ee_g); // //CUT_CHECK_ERROR("energint execution failed\n"); // CUDA_CHECK( hipDeviceSynchronize() ); //Change of intrinsec frequency // Total dissipation from breaking and bottom friction if (Param.breakmodel == 1) { roelvinkCPU(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g); } else { baldockCPU(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g);//Baldock more appropriate for pseudo stationary cases } // Calculate roller energy balance if (roller == 1) { xadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, rr_g, c_g, cxsth_g, uu_g, xadvec_g); yadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, rr_g, c_g, sxnth_g, vv_g, yadvec_g); thetaadvecuw2hoCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, rr_g, ctheta_g, thetaadvec_g); eulerupwindCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, rr_g, xadvec_g, yadvec_g, thetaadvec_g); rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, rr_g); } // Distribution of dissipation over directions and frequencies dissipationCPU(nx, ny, ntheta, dtheta, Param.eps, Param.dt, Param.g, Param.beta, wci_g, hh_g, ee_g, D_g, E_g, rr_g, c_g, cxsth_g, sxnth_g, uu_g, vv_g, DR_g, R_g); //Fix lateraL BND rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, ee_g); // Compute mean wave direction meandirCPU(nx, ny, ntheta, Param.rho, Param.g, dtheta, ee_g, theta_g, thetamean_g, E_g, H_g); // Radiation stresses and forcing terms radstressCPU(nx, ny, ntheta, Param.dx, dtheta, ee_g, rr_g, cxsth_g, sxnth_g, cg_g, c_g, Sxx_g, Sxy_g, Syy_g); // Wave forces wavforceCPU(nx, ny, ntheta, Param.dx, dtheta, Sxx_g, Sxy_g, Syy_g, Fx_g, Fy_g, hh_g); //Lat Bnd twodimbndnoixCPU(nx, ny, Param.eps, hh_g, Fx_g); twodimbndCPU(nx, ny, Param.eps, hh_g, Fy_g); // CAlculate stokes velocity and breaker delay //Breaker delay removed because it is slow and kinda useless breakerdelayCPU(nx, ny, ntheta, dtheta, Param.g, Param.rho, Trep, Param.eps, urms_g, ust_g, H_g, E_g, c_g, k_g, hh_g, R_g); twodimbndCPU(nx, ny, Param.eps, hh_g, ust_g); // Adjust Offshore Bnd //CUDA_CHECK( hipMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM ), hipMemcpyHostToDevice) ); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) //offshorebndWav<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g); ////CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); //CUDA_CHECK( hipDeviceSynchronize() ); }
ada313030dc137e9c3895420fc1230f39fa86b6f.cu
#include "XBeachGPU.h" void CUDA_CHECK(cudaError CUDerr) { if (cudaSuccess != CUDerr) { fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString(CUDerr)); write_text_to_log_file("Cuda error in file " + std::string(__FILE__) + " in line " + std::to_string(__LINE__) + " " + std::string(cudaGetErrorString(CUDerr))); exit(EXIT_FAILURE); } } XBGPUParam waveinitGPU(XBGPUParam Param, std::vector<Wavebndparam> wavebnd) { // Initialize wave model int nx, ny; nx = Param.nx; ny = Param.ny; if (Param.dtheta > 0.0) { Param.ntheta = round((Param.thetamax - Param.thetamin) / Param.dtheta); } else { if (Param.ntheta == 0) { Param.ntheta = 1; } Param.dtheta = (Param.thetamax - Param.thetamin) / Param.ntheta; } ntheta = Param.ntheta; dtheta = Param.dtheta; printf("ntheta=%d\tdtheta=%f\n", ntheta, dtheta); write_text_to_log_file("ntheta=" + std::to_string(ntheta) + "\t dtheta=" + std::to_string(dtheta)); //printf("nwavbnd=%d\n", nwavbnd); if (Param.wavebndtype == 1) { nwavbnd = wavebnd.size(); // one Stfile/qfile will be used throughout the simulation } if (Param.wavebndtype >= 2) { nwavbnd = ceil(Param.rtlength / Param.dtbc)+1; // +1 needed here } theta = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); Stfile = (double *)malloc(ntheta*ny*nwavbnd*sizeof(double)); qfile = (double *)malloc(4 * ny*nwavbnd*sizeof(double)); Tpfile = (double *)malloc(nwavbnd*sizeof(double)); //dummy=(double *)malloc(1000*sizeof(double)); qbndnew = (DECNUM *)malloc(4 * ny*sizeof(DECNUM)); qbndold = (DECNUM *)malloc(4 * ny*sizeof(DECNUM)); St = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); Stold = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); Stnew = (DECNUM *)malloc(ntheta*ny*sizeof(DECNUM)); cxsth = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); sxnth = (DECNUM *)malloc(ntheta*sizeof(DECNUM)); for (int i = 0; i < ntheta; i++) { theta[i] = i*(Param.dtheta)+Param.thetamin + 0.5f*Param.dtheta; cxsth[i] = cos(theta[i]); sxnth[i] = sin(theta[i]); //printf("theta=%f\tcxsth=%f\tsxnth=%f\n", theta[i], cxsth[i], sxnth[i]); } dang = theta[1] - theta[0]; //dtheta=dang; ee = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); dd = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); wete = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); rr = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cgx = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cgy = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cx = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); cy = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); ctheta = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); thet = (DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); //drr=(DECNUM *)malloc(nx*ny*ntheta*sizeof(DECNUM)); printf("Generating initial wave bnd data\n"); write_text_to_log_file("Generating initial wave bnd data"); if (Param.wavebndtype == 1) { //GenCstWave(Param, wavebnd, Stfile, qfile, Tpfile); GenCstWave(Param, wavebnd, theta, Stfile, qfile, Tpfile); Trep = Tpfile[0]; //readStatbnd(nx, ny, ntheta, Param.rho, Param.g, Param.wavebndfile.c_str(), Tpfile, Stfile); //Trepold = Tpfile[0]; //Trepnew = Tpfile[1]; //rt = dtwavbnd; } if (Param.wavebndtype == 2) { //readXbbndstep(nx, ny, ntheta, Param.wavebndfile.c_str(), 1, Trepold, qfile, Stfile); readXbbndstep(Param, wavebnd, 0, Trep, qfile, Stfile); } if (Param.wavebndtype == 3) { // Reuse XBeach_GPU style wave boundary. same as normal XBeach but as a self documented netcdf file read_reuse_bndnc(Param, 0, Trep, qfile, Stfile); } if (Param.wavebndtype == 4) { //JONSWAP //First generate a Highres 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; makjonswap(Param, wavebnd, 0, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } createbndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, 0.0, wavebnd[0].Hs, Trep, wavebnd[0].Tp, wavebnd[0].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } if (Param.wavebndtype == 5) { // //SWAN spectrum //First read in the 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; readSWANSPEC(Param, wavebnd, 0, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } createbndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, 0.0, wavebnd[0].Hs, Trep, wavebnd[0].Tp, wavebnd[0].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } nwbndstep = 0; for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } //fscanf(fwav,"%f\t%f\t%f\t%f\t%f\t%f",&hm0gew,&fp,&mainang,&scoeff,&gam,&rt); //mainang=(1.5*pi-grdalpha)-mainang*pi/180; //fp=1/fp; //printf("init rt=%f\n",rt); //makjonswap(hm0gew,fp,mainang,rt,scoeff,gam,theta,ntheta,Trepnew, Stnew); //Clac Stat for (int i = 0; i < ntheta; i++) //! Fill St { //St[i]=Stold[i]; //printf("St[%d]=%f\n",i,St[i]); for (int ii = 0; ii < ny; ii++) { St[ii + i*ny] = Stold[ii + i*ny]; } } //printf("hh=%f\n",hh[0]); // Apply bnd on CPU side for (int ii = 0; ii < nx; ii++) { for (int jj = 0; jj < ny; jj++) { for (int nt = 0; nt < ntheta; nt++) { if (ii == 0) { ee[0 + jj*nx + nt*nx*ny] = St[jj + nt*ny];// not on gpu since it is a bank conflicting problem } else { ee[ii + jj*nx + nt*nx*ny] = 0.0f; } rr[ii + jj*nx + nt*nx*ny] = 0.0f; } } } //run dispersion relation return Param; } void wavebndOLD(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; if (totaltime >= dtwavbnd*(nwavbnd*wxstep - 1))//The -1 here is so that we read the next file before the last step of the previous file runs out { if (Param.wavebndtype == 2) { //readXbbndstep(nx, ny, ntheta, Param.wavebndfile.c_str(), wxstep, Trep, qfile, Stfile); } nwbndstep = 0; for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } if (Param.wavebndtype == 2) { for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } } wxstep = wxstep + 1; } //if ((nstep==1 || nstep==nwstp) && (imodel==1 || imodel>=3)) //{ //update wave bnd if (totaltime >= wavbndtime /*&& wavebndtype==2*/) { for (int i = 0; i < ntheta; i++) //! Fill Stold { for (int ni = 0; ni < ny; ni++) { Stold[ni + i*ny] = Stfile[ni + i*ny + nwbndstep*ntheta*ny]; } } //fscanf(fwav,"%f\t%f\t%f\t%f\t%f\t%f",&hm0gew,&fp,&mainang,&scoeff,&gam,&rt); //mainang=(1.5*pi-grdalpha)-mainang*pi/180; //printf("rt=%f\n",rt); //fp=1/fp; //fscanf(fwav,"%f\t%f",&rt,&Trepnew); nwbndstep = nwbndstep + 1; for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { Stnew[ni + i*ny] = Stfile[ni + i*ny + nwbndstep*ntheta*ny]; if (Param.wavebndtype == 1) { Trep = Tpfile[nwbndstep]; } } } if (Param.wavebndtype == 2) { for (int ni = 0; ni < ny; ni++) { for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qbndnew[ni + ny*xi]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; } } if (Param.GPUDEVICE >= 0) { CUDA_CHECK(cudaMemcpy(qbndold_g, qbndold, 4 * ny*sizeof(DECNUM), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(qbndnew_g, qbndnew, 4 * ny*sizeof(DECNUM), cudaMemcpyHostToDevice)); } //printf("qbndold[300]=%f\n",qbndold[300]); //printf("qbndnew[300]=%f\n",qbndnew[300]); } //printf("Stfile[0]=%f\n",Stfile[0]); //makjonswap(hm0gew,fp,mainang,rt,scoeff,gam,theta,ntheta,Trepnew, Stnew); //wavbndtime=wavbndtime+dtwavbnd; wavbndtime = nwbndstep*dtwavbnd + (wxstep - 1)*nwavbnd*dtwavbnd; //should be better than above as it will not accumulate the rounbd off error } for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { St[ni + i*ny] = Stold[ni + i*ny] + (totaltime - wavbndtime + dtwavbnd)*(Stnew[ni + i*ny] - Stold[ni + i*ny]) / dtwavbnd; } //printf("St[%d]=%f\n",i,St[i*ny]); } //printf("Wave timestep:%f\n",wdt); //Wave model step //wavestep(); //nwstp = nstep + nstpw; //wdt = dt; //} } void wavebnd(XBGPUParam Param, std::vector<Wavebndparam> wavebndvec) { int nx, ny; double timenext, timesincelast; nx = Param.nx; ny = Param.ny; //update sl bnd // find next timestep double difft = wavebndvec[WAVstepinbnd].time - totaltime; if (difft < 0.0) { WAVstepinbnd++; if (Param.wavebndtype == 2) { //Read new STfile and qfile XBeach style readXbbndstep(Param, wavebndvec, WAVstepinbnd - 1, Trep, qfile, Stfile); } if (Param.wavebndtype == 3) { // Reuse XBeach_GPU style wave boundary. same as normal XBeach but as a self documented netcdf file read_reuse_bndnc(Param, WAVstepinbnd - 1, Trep, qfile, Stfile); } if (Param.wavebndtype == 4) { //JONSWAP //First generate a Highres 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; makjonswap(Param, wavebndvec, WAVstepinbnd - 1, nfHR, ndHR, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// // Stfile is not ordered teh way we want to save it to file so we need a temporary storage to rearange double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = (int)ceil(Param.rtlength / Param.dtbc)+1; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } writebndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, wavebndvec[WAVstepinbnd - 1].time, wavebndvec[WAVstepinbnd - 1].Hs, Trep, wavebndvec[WAVstepinbnd - 1].Tp, wavebndvec[WAVstepinbnd - 1].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void writebndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); } if (Param.wavebndtype == 5) { // //SWAN spectrum //First read in the 2D spec double * HRfreq; double * HRdir; double * HRSpec; int nfHR, ndHR; readSWANSPEC(Param, wavebndvec, WAVstepinbnd - 1, nfHR, ndHR, HRfreq, HRdir, HRSpec); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); //Then generate wave group timeseries based on that spectra //void GenWGnLBW(XBGPUParam Param, int nf, int ndir, double * HRfreq, double * HRdir, double * HRSpec, float Trep, double * qfile, double * Stfile) GenWGnLBW(Param, nfHR, ndHR, HRfreq, HRdir, HRSpec, Trep, qfile, Stfile); //create2dnc(nfHR, ndHR, HRfreq[1] - HRfreq[0], HRdir[1] - HRdir[0], 0.0, HRfreq, HRdir, HRSpec); ////////////////////////////////////// //Save to Netcdf file ////////////////////////////////////// double * yyfx, *ttfx, *thetafx; double * qxtemp, *qytemp, *eetemp; int tslenbc = nwavbnd; qxtemp = (double *)malloc(ny*tslenbc*sizeof(double)); qytemp = (double *)malloc(ny*tslenbc*sizeof(double)); eetemp = (double *)malloc(ny*Param.ntheta*tslenbc*sizeof(double)); yyfx = (double *)malloc(ny*sizeof(double)); ttfx = (double *)malloc(tslenbc*sizeof(double)); thetafx = (double *)malloc(Param.ntheta*sizeof(double)); for (int j = 0; j < Param.ny; j++) { yyfx[j] = j*Param.dx; } for (int m = 0; m < tslenbc; m++) { ttfx[m] = m*Param.dtbc; } for (int itheta = 0; itheta < Param.ntheta; itheta++) { thetafx[itheta] = itheta*(Param.dtheta) + Param.thetamin + 0.5f*Param.dtheta; } //for Debugging //create2dnc(ny, tslen, Param.dx, dtin, 0.0, yyfx, tin, qx); //create3dnc(ny, Param.ntheta, tslen, Param.dx, Param.dtheta, dtin, 0.0, yyfx, thetafx, tin, zeta); for (int j = 0; j < Param.ny; j++) { for (int m = 0; m < tslenbc; m++) { qxtemp[m + j*tslenbc] = qfile[j + 0 * ny + m*ny * 4]; qytemp[m + j*tslenbc] = qfile[j + 1 * ny + m*ny * 4]; for (int itheta = 0; itheta < Param.ntheta; itheta++) { eetemp[m + j*tslenbc + itheta*ny*tslenbc] = Stfile[j + itheta*ny + m*ny*Param.ntheta]; } } } writebndnc(tslenbc, ny, Param.ntheta, Param.dx, Param.dtheta, wavebndvec[WAVstepinbnd - 1].time, wavebndvec[WAVstepinbnd - 1].Hs, Trep, wavebndvec[WAVstepinbnd - 1].Tp, wavebndvec[WAVstepinbnd - 1].Dp, ttfx, yyfx, thetafx, eetemp, qxtemp, qytemp); //void createbndnc(int tslen, int ny, int ntheta, double dy, double dtheta, double totaltime, double Hs, double Trep, double Tp, double Dp, double * timevec, double *yy, double *theta, double * ee, double * qx, double * qy) //Trep = 15.0;// free(HRSpec); free(HRfreq); free(HRdir); free(qxtemp); free(qytemp); free(eetemp); free(yyfx); free(ttfx); free(thetafx); } } //spetial treatment when difft == 0.0 if (Param.wavebndtype == 1) { nwbndstep = WAVstepinbnd - 1; timenext = wavebndvec[WAVstepinbnd].time - wavebndvec[WAVstepinbnd - 1].time; timesincelast = (totaltime - wavebndvec[WAVstepinbnd - 1].time); } if (Param.wavebndtype >= 2) { nwbndstep = min(floor((totaltime - wavebndvec[WAVstepinbnd - 1].time) / Param.dtbc),ceil(Param.rtlength/Param.dtbc)-1); //nwbndstep = nwbndstep + 1;// trying to solve discrepency between XB and XBGPU bnd timenext = Param.dtbc; timesincelast = totaltime - (nwbndstep*Param.dtbc + wavebndvec[WAVstepinbnd - 1].time); //nwbndstep = nwbndstep + 1;//trying to solve discrepency between XB and XBGPU bnd } for (int ni = 0; ni < ny; ni++) { for (int itheta = 0; itheta < ntheta; itheta++) { Stold[ni + itheta*ny] = Stfile[ni + itheta*ny + nwbndstep*ny*ntheta]; Stnew[ni + itheta*ny] = Stfile[ni + itheta*ny + (nwbndstep + 1)*ny*ntheta]; } for (int xi = 0; xi < 4; xi++) { qbndold[ni + xi*ny] = qfile[ni + xi*ny + nwbndstep*ny * 4]; qbndnew[ni + xi*ny] = qfile[ni + xi*ny + (nwbndstep + 1)*ny * 4]; } } for (int i = 0; i < ntheta; i++) //! Fill St { for (int ni = 0; ni < ny; ni++) { St[ni + i*ny] = interptime(Stnew[ni + i*ny], Stold[ni + i*ny], timenext, timesincelast); } } if (Param.flow == 1) { CUDA_CHECK(cudaMemcpy(qbndold_g, qbndold, 4 * ny*sizeof(DECNUM), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(qbndnew_g, qbndnew, 4 * ny*sizeof(DECNUM), cudaMemcpyHostToDevice)); } } void wavestep(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; double dt = Param.dt; ntheta = Param.ntheta; //Subroutine runs the wave model dim3 blockDim(16, 16, 1); dim3 gridDim(ceil((nx*1.0f) / blockDim.x), ceil((ny*1.0f) / blockDim.y), 1); dim3 blockDim4(4, 4, 1); dim3 gridDim4(ceil((nx*1.0f) / blockDim.x), ceil((ny*1.0f) / blockDim.y), 1); CUDA_CHECK(cudaMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM), cudaMemcpyHostToDevice)); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) offshorebndWav << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, totaltime, Trep, St_g, sigm_g, ee_g); //CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //Sanity check sanity << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, sigm_g, ntheta, ee_g); //CUT_CHECK_ERROR("sanity execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMalloc((void **)&cg_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMalloc((void **)&cx_g, nx*ny*ntheta*sizeof(DECNUM )) ); // CUDA_CHECK( cudaMalloc((void **)&c_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMalloc((void **)&cy_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMalloc((void **)&k_g, nx*ny*sizeof(DECNUM )) ); // not sure this is worth it we'd rather allocate from main and kill when it is all done... CUDA_CHECK(cudaMalloc((void **)&kh_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&sinh2kh_g, nx*ny*sizeof(DECNUM))); //dispersion dispersion << <gridDim, blockDim, 0 >> >(nx, ny, twopi, Param.g, aphi, bphi, sigm_g, hh_g, k_g, c_g, kh_g, sinh2kh_g, cg_g); //CUT_CHECK_ERROR("dispersion execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMemcpy(C,kh_g, ny*nx*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); CUDA_CHECK(cudaMalloc((void **)&dhdx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&dhdy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&dudx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&dudy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&dvdx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&dvdy_g, nx*ny*sizeof(DECNUM))); // Wave current interaction (i.e remove wci in shallow water) calcwci << <gridDim, blockDim, 0 >> >(nx, ny, Param.wci, Param.hwci, hh_g, wci_g); //CUT_CHECK_ERROR("calcwci execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Slopes of water depth and velocities slopes << <gridDim, blockDim, 0 >> >(nx, ny, Param.dx, hh_g, uu_g, vv_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g);// //CUT_CHECK_ERROR("slopes execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMalloc((void **)&cgx_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMalloc((void **)&cgy_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMalloc((void **)&ctheta_g, nx*ny*ntheta*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMemcpy(C,kh_g, ny*nx*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); //Propagation speed in theta space propagtheta << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, wci_g, ctheta_g,/*c_g,cx_g,cy_g,*/cxsth_g, sxnth_g,/*uu_g,vv_g,*/dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g, sigm_g, kh_g);// //CUT_CHECK_ERROR("propagtheta execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); ////////// //CUDA_CHECK( cudaMemcpy(ctheta,ctheta_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); ////////// CUDA_CHECK(cudaFree(dhdx_g)); CUDA_CHECK(cudaFree(dhdy_g)); CUDA_CHECK(cudaFree(dudx_g)); CUDA_CHECK(cudaFree(dudy_g)); CUDA_CHECK(cudaFree(dvdx_g)); CUDA_CHECK(cudaFree(dvdy_g)); // //read3Dnc(nx,ny,ntheta,"eeX.nc",ee); //CUDA_CHECK( cudaMemcpy(ee_g, ee, nx*ny*ntheta*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); // // transform to wave action // action << <gridDim, blockDim, 0 >> >(ntheta, nx, ny, ee_g, sigm_g); //CUT_CHECK_ERROR("action execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Upwind Euler timestep propagation // CUDA_CHECK(cudaMalloc((void **)&xadvec_g, nx*ny*ntheta*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&yadvec_g, nx*ny*ntheta*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&thetaadvec_g, nx*ny*ntheta*sizeof(DECNUM))); xadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, ee_g, cg_g, cxsth_g, uu_g, xadvec_g); //CUT_CHECK_ERROR("eulerupwind xadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); yadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, ee_g, cg_g, sxnth_g, vv_g, yadvec_g); //CUT_CHECK_ERROR("eulerupwind yadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMalloc((void **)&eect_g, nx*ny*ntheta*sizeof(DECNUM )) ); //eectheta<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,ee_g,ctheta_g,eect_g); ////CUT_CHECK_ERROR("eulerupwind eectheta execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); //thetaadvecuw<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,dtheta,eect_g,thetaadvec_g); ////CUT_CHECK_ERROR("eulerupwind thetaadvecuw execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); thetaadvecuw2ho << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, ee_g, ctheta_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind thetaadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMemcpy(ctheta,yadvec_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); //CUDA_CHECK( cudaMemcpy(ctheta,thetaadvec_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); //read3Dnc(nx,ny,ntheta,"xadvecX.nc",ee); //CUDA_CHECK( cudaMemcpy(xadvec_g, ee, nx*ny*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); //read3Dnc(nx,ny,ntheta,"yadvecX.nc",ee); //CUDA_CHECK( cudaMemcpy(yadvec_g, ee, nx*ny*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); //read3Dnc(nx,ny,ntheta,"thetaadvecX.nc",ee); //CUDA_CHECK( cudaMemcpy(thetaadvec_g, ee, nx*ny*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); eulerupwind << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, ee_g, xadvec_g, yadvec_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaFree(cgx_g)); //CUDA_CHECK( cudaFree(cgy_g)); //Fix lateraL BND rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, ee_g); //CUT_CHECK_ERROR("energy latbnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // transform back to wave energy // energy << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, ee_g, sigm_g); //CUT_CHECK_ERROR("energy execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMemcpy(ctheta,ee_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); //CUDA_CHECK( cudaMalloc((void **)&H_g, nx*ny*sizeof(DECNUM )) ); CUDA_CHECK(cudaMalloc((void **)&E_g, nx*ny*sizeof(DECNUM))); //CUDA_CHECK( cudaMalloc((void **)&D_g, nx*ny*sizeof(DECNUM )) ); //CUDA_CHECK( cudaMemcpy(ctheta,ee_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); // // Energy integrated over wave directions,Hrms // energint << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.rho, Param.g, Param.gammax, E_g, H_g, hh_g, ee_g); //CUT_CHECK_ERROR("energint execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // calculate change in intrinsic frequency // removed because it is super slow and doesn't do much // // tm is thetamean and it is calculated in the mean dir scheme // CUDA_CHECK( cudaMalloc((void **)&tm_g, nx*ny*sizeof(DECNUM )) ); // calctm<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,tm_g,theta_g,ee_g); // //CUT_CHECK_ERROR("energint execution failed\n"); // CUDA_CHECK( cudaThreadSynchronize() ); /* //Change of intrinsec frequency */ // // Total dissipation from breaking and bottom friction // if (Param.breakmodel == 1) { roelvink << <gridDim, blockDim, 0 >> >(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g); //CUT_CHECK_ERROR("roelvink execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); } else { baldock << <gridDim, blockDim, 0 >> > (nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g);//Baldock more appropriate for pseudo stationary cases //CUT_CHECK_ERROR("baldoc execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); } // // Calculate roller energy balance // //CUDA_CHECK( cudaMemcpy(hhmean,E_g, nx*ny*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); if (Param.roller == 1) { xadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, rr_g, c_g, cxsth_g, uu_g, xadvec_g); //CUT_CHECK_ERROR("eulerupwind xadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); yadvecupwind2 << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, wci_g, rr_g, c_g, sxnth_g, vv_g, yadvec_g); //CUT_CHECK_ERROR("eulerupwind yadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //eectheta<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,rr_g,ctheta_g,eect_g); ////CUT_CHECK_ERROR("eulerupwind eectheta execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); //thetaadvecuw<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,dtheta,eect_g,thetaadvec_g); ////CUT_CHECK_ERROR("eulerupwind thetaadvecuw execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); thetaadvecuw2ho << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, rr_g, ctheta_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind thetaadvec execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); eulerupwind << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.dx, dt, Param.wci, rr_g, xadvec_g, yadvec_g, thetaadvec_g); //CUT_CHECK_ERROR("eulerupwind execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Adjust lateral bnds // rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, rr_g); //CUT_CHECK_ERROR("rollerlatbnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); } //CUDA_CHECK( cudaFree(eect_g)); CUDA_CHECK(cudaFree(xadvec_g)); CUDA_CHECK(cudaFree(yadvec_g)); CUDA_CHECK(cudaFree(thetaadvec_g)); //read2Dnc(nx,ny,"D.nc",uu); //CUDA_CHECK( cudaMemcpy(D_g, uu, nx*ny*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); // // Distribution of dissipation over directions and frequencies // dissipation << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.eps, dt, Param.g, Param.beta, wci_g, hh_g, ee_g, D_g, E_g, rr_g, c_g, cxsth_g, sxnth_g, uu_g, vv_g, DR_g, R_g); //CUT_CHECK_ERROR("dissipation execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // //Fix lateraL BND // rollerlatbnd << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.eps, hh_g, ee_g); //CUT_CHECK_ERROR("energy latbnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Compute mean wave direction // meandir << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.rho, Param.g, dtheta, ee_g, theta_g, thetamean_g, E_g, H_g); //CUT_CHECK_ERROR("meandir execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Constant warm start // WARNING ONLY TO BE USED FOR DEBUGGING // // read3Dnc(nx,ny,ntheta,"eeX.nc",ee); // CUDA_CHECK( cudaMemcpy(ee_g, ee, nx*ny*ntheta*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); // // Radiation stresses and forcing terms // CUDA_CHECK(cudaMalloc((void **)&Sxx_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&Sxy_g, nx*ny*sizeof(DECNUM))); CUDA_CHECK(cudaMalloc((void **)&Syy_g, nx*ny*sizeof(DECNUM))); radstress << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.dx, dtheta, ee_g, rr_g, cxsth_g, sxnth_g, cg_g, c_g, Sxx_g, Sxy_g, Syy_g); //CUT_CHECK_ERROR("radstress execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); // // Wave forces // wavforce << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, Param.dx, dtheta, Sxx_g, Sxy_g, Syy_g, Fx_g, Fy_g, hh_g); //CUT_CHECK_ERROR("wavforce execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); twodimbndnoix << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, Fx_g); //CUT_CHECK_ERROR("wave force X bnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); twodimbnd << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, Fy_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //CUDA_CHECK( cudaMemcpy(ctheta,ctheta_g, ny*nx*ntheta*sizeof(DECNUM ), cudaMemcpyDeviceToHost) ); // // CAlculate stokes velocity and breaker delay //Breaker delay removed because it is slow and kinda useless // breakerdelay << <gridDim, blockDim, 0 >> >(nx, ny, ntheta, dtheta, Param.g, Param.rho, Trep, Param.eps, urms_g, ust_g, H_g, E_g, c_g, k_g, hh_g, R_g); //CUT_CHECK_ERROR("breakerdelay execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); //twodimbnd<<<gridDim, blockDim, 0>>>(nx,ny,eps,hh_g,urms_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); twodimbnd << <gridDim, blockDim, 0 >> >(nx, ny, Param.eps, hh_g, ust_g); //CUT_CHECK_ERROR("wave force Y bnd execution failed\n"); CUDA_CHECK(cudaThreadSynchronize()); CUDA_CHECK(cudaFree(Sxy_g)); CUDA_CHECK(cudaFree(Sxx_g)); CUDA_CHECK(cudaFree(Syy_g)); //CUDA_CHECK( cudaFree(cg_g)); //CUDA_CHECK( cudaFree(c_g)); CUDA_CHECK(cudaFree(tm_g)); // // Adjust Offshore Bnd // //CUDA_CHECK( cudaMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) //offshorebndWav<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g); ////CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); CUDA_CHECK(cudaFree(E_g)); //CUDA_CHECK( cudaFree(H_g)); //CUDA_CHECK( cudaFree(D_g)); //CUDA_CHECK( cudaFree(k_g)); CUDA_CHECK(cudaFree(kh_g)); CUDA_CHECK(cudaFree(sinh2kh_g)); } void wavestepCPU(XBGPUParam Param) { int nx, ny; nx = Param.nx; ny = Param.ny; //Subroutine runs the wave model //printf("%2.2f\t",ee_g[16+8*nx+1*ntheta]); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) offshorebndWavCPU(nx, ny, ntheta, totaltime, Trep, St, sigm_g, ee_g); //Sanity check sanityCPU(nx, ny, Param.eps, hh_g, sigm_g, ntheta, ee_g); //printf("%2.2f\t", ee_g[16 + 8 * nx + 1 * ntheta]); //dispersion //printf("%2.2f\t", ee_g[16 + 8 * nx + 1 * ntheta]); dispersionCPU(nx, ny, twopi, Param.g, aphi, bphi, sigm_g, hh_g, k_g, c_g, kh_g, sinh2kh_g, cg_g); // Wave current interaction (i.e remove wci in shallow water) calcwciCPU(nx, ny, Param.wci, Param.hwci, hh_g, wci_g); //printf("%f\t",ee_g[0+16*nx+6*nx*ny]); // Slopes of water depth and velocities slopesCPU(nx, ny, Param.dx, hh_g, uu_g, vv_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g);// //printf("%f\t",ee_g[0+16*nx+6*nx*ny]); //Propagation speed in theta space propagthetaCPU(nx, ny, ntheta, wci_g, ctheta_g, cxsth_g, sxnth_g, dhdx_g, dhdy_g, dudx_g, dudy_g, dvdx_g, dvdy_g, sigm_g, kh_g);// //printf("%f\n",ee_g[200+16*nx]); //read3Dnc(nx, ny, ntheta, "eeX.nc", ee_g); //printf("%f\n", ee_g[200 + 16 * nx]); // transform to wave action actionCPU(ntheta, nx, ny, ee_g, sigm_g); // Upwind Euler timestep propagation xadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, ee_g, cg_g, cxsth_g, uu_g, xadvec_g); yadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, ee_g, cg_g, sxnth_g, vv_g, yadvec_g); thetaadvecuw2hoCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, ee_g, ctheta_g, thetaadvec_g); //Apply advection eulerupwindCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, ee_g, xadvec_g, yadvec_g, thetaadvec_g); //Fix lateraL BND rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, ee_g); // transform back to wave energy energyCPU(nx, ny, ntheta, ee_g, sigm_g); // Energy integrated over wave directions,Hrms energintCPU(nx, ny, ntheta, dtheta, Param.rho, Param.g, Param.gammax, E_g, H_g, hh_g, ee_g); // // calculate change in intrinsic frequency // removed because it is super slow and doesn't do much // // tm is thetamean and it is calculated in the mean dir scheme // CUDA_CHECK( cudaMalloc((void **)&tm_g, nx*ny*sizeof(DECNUM )) ); // calctm<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,tm_g,theta_g,ee_g); // //CUT_CHECK_ERROR("energint execution failed\n"); // CUDA_CHECK( cudaThreadSynchronize() ); //Change of intrinsec frequency // Total dissipation from breaking and bottom friction if (Param.breakmodel == 1) { roelvinkCPU(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g); } else { baldockCPU(nx, ny, Param.rho, Param.g, Param.gammaa, Param.alpha, Param.n, Trep, fwm_g, cfm_g, hh_g, H_g, E_g, D_g, k_g);//Baldock more appropriate for pseudo stationary cases } // Calculate roller energy balance if (roller == 1) { xadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, rr_g, c_g, cxsth_g, uu_g, xadvec_g); yadvecupwind2CPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, wci_g, rr_g, c_g, sxnth_g, vv_g, yadvec_g); thetaadvecuw2hoCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, rr_g, ctheta_g, thetaadvec_g); eulerupwindCPU(nx, ny, ntheta, dtheta, Param.dx, Param.dt, Param.wci, rr_g, xadvec_g, yadvec_g, thetaadvec_g); rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, rr_g); } // Distribution of dissipation over directions and frequencies dissipationCPU(nx, ny, ntheta, dtheta, Param.eps, Param.dt, Param.g, Param.beta, wci_g, hh_g, ee_g, D_g, E_g, rr_g, c_g, cxsth_g, sxnth_g, uu_g, vv_g, DR_g, R_g); //Fix lateraL BND rollerlatbndCPU(nx, ny, ntheta, Param.eps, hh_g, ee_g); // Compute mean wave direction meandirCPU(nx, ny, ntheta, Param.rho, Param.g, dtheta, ee_g, theta_g, thetamean_g, E_g, H_g); // Radiation stresses and forcing terms radstressCPU(nx, ny, ntheta, Param.dx, dtheta, ee_g, rr_g, cxsth_g, sxnth_g, cg_g, c_g, Sxx_g, Sxy_g, Syy_g); // Wave forces wavforceCPU(nx, ny, ntheta, Param.dx, dtheta, Sxx_g, Sxy_g, Syy_g, Fx_g, Fy_g, hh_g); //Lat Bnd twodimbndnoixCPU(nx, ny, Param.eps, hh_g, Fx_g); twodimbndCPU(nx, ny, Param.eps, hh_g, Fy_g); // CAlculate stokes velocity and breaker delay //Breaker delay removed because it is slow and kinda useless breakerdelayCPU(nx, ny, ntheta, dtheta, Param.g, Param.rho, Trep, Param.eps, urms_g, ust_g, H_g, E_g, c_g, k_g, hh_g, R_g); twodimbndCPU(nx, ny, Param.eps, hh_g, ust_g); // Adjust Offshore Bnd //CUDA_CHECK( cudaMemcpy(St_g, St, ny*ntheta*sizeof(DECNUM ), cudaMemcpyHostToDevice) ); //offshorebndWav(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g) //offshorebndWav<<<gridDim, blockDim, 0>>>(nx,ny,ntheta,totaltime,Trep,St_g,sigm_g,ee_g); ////CUT_CHECK_ERROR("Offshore Wave bnd execution failed\n"); //CUDA_CHECK( cudaThreadSynchronize() ); }
b6741084f543c7ecba7652abc80400fe24b86184.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <distance/distance.cuh> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename DataType> __global__ void naiveDistanceAdjKernel(bool* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto diff = x[xidx] - y[yidx]; acc += diff * diff; } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc <= eps; } template <typename DataType> void naiveDistanceAdj(bool* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor) { static const dim3 TPB(16, 32, 1); dim3 nblks(raft::ceildiv(m, (int)TPB.x), raft::ceildiv(n, (int)TPB.y), 1); naiveDistanceAdjKernel<DataType> < <<nblks, TPB>>(dist, x, y, m, n, k, eps, isRowMajor); CUDA_CHECK(hipPeekAtLastError()); } template <typename DataType> struct DistanceAdjInputs { DataType eps; int m, n, k; bool isRowMajor; unsigned long long int seed; }; template <typename DataType> ::std::ostream& operator<<(::std::ostream& os, const DistanceAdjInputs<DataType>& dims) { return os; } template <typename DataType> class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<DataType>> { public: DistanceAdjTest() : x(0, stream), y(0, stream), dist_ref(0, stream), dist(0, stream) {} void SetUp() override { params = ::testing::TestWithParam < DistanceAdjInputs<DataType>::GetParam(); raft::random::Rng r(params.seed); auto m = params.m; auto n = params.n; auto k = params.k; bool isRowMajor = params.isRowMajor; hipStream_t stream = 0; CUDA_CHECK(hipStreamCreate(&stream)); x = rmm::device_scalar<DataType>(m * k, stream); y = rmm::device_scalar<DataType>(n * k, stream); dist_ref = rmm::device_scalar<bool>(m * n, stream); dist = rmm::device_scalar<bool>(m * n, stream); r.uniform(x.data(), m * k, DataType(-1.0), DataType(1.0), stream); r.uniform(y.data(), n * k, DataType(-1.0), DataType(1.0), stream); DataType threshold = params.eps; naiveDistanceAdj(dist_ref.data(), x.data(), y.data(), m, n, k, threshold, isRowMajor); size_t worksize = getWorkspaceSize<raft::distance::DistanceType::L2Expanded, DataType, DataType, bool>( x, y, m, n, k); rmm::device_uvector<char> workspace(worksize, stream); auto fin_op = [threshold] __device__(DataType d_val, int g_d_idx) { return d_val <= threshold; }; distance<raft::distance::DistanceType::L2Expanded, DataType, DataType, bool>(x.data(), y.data(), dist.data(), m, n, k, workspace.data(), worksize, fin_op, stream, isRowMajor); CUDA_CHECK(hipStreamDestroy(stream)); } protected: DistanceAdjInputs<DataType> params; rmm::device_scalar<DataType> x, y; rmm::device_scalar<bool> dist_ref, dist; }; const std::vector<DistanceAdjInputs<float>> inputsf = { {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.1f, 1024, 1024, 32, true, 1234ULL}, {1.0f, 1024, 1024, 32, true, 1234ULL}, {10.0f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.1f, 1024, 1024, 32, false, 1234ULL}, {1.0f, 1024, 1024, 32, false, 1234ULL}, {10.0f, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<float> DistanceAdjTestF; TEST_P(DistanceAdjTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceAdjInputs<double>> inputsd = { {0.01, 1024, 1024, 32, true, 1234ULL}, {0.1, 1024, 1024, 32, true, 1234ULL}, {1.0, 1024, 1024, 32, true, 1234ULL}, {10.0, 1024, 1024, 32, true, 1234ULL}, {0.01, 1024, 1024, 32, false, 1234ULL}, {0.1, 1024, 1024, 32, false, 1234ULL}, {1.0, 1024, 1024, 32, false, 1234ULL}, {10.0, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<double> DistanceAdjTestD; TEST_P(DistanceAdjTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD, ::testing::ValuesIn(inputsd)); } // namespace Distance } // end namespace MLCommon
b6741084f543c7ecba7652abc80400fe24b86184.cu
/* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <distance/distance.cuh> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename DataType> __global__ void naiveDistanceAdjKernel(bool* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto diff = x[xidx] - y[yidx]; acc += diff * diff; } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc <= eps; } template <typename DataType> void naiveDistanceAdj(bool* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor) { static const dim3 TPB(16, 32, 1); dim3 nblks(raft::ceildiv(m, (int)TPB.x), raft::ceildiv(n, (int)TPB.y), 1); naiveDistanceAdjKernel<DataType> < <<nblks, TPB>>(dist, x, y, m, n, k, eps, isRowMajor); CUDA_CHECK(cudaPeekAtLastError()); } template <typename DataType> struct DistanceAdjInputs { DataType eps; int m, n, k; bool isRowMajor; unsigned long long int seed; }; template <typename DataType> ::std::ostream& operator<<(::std::ostream& os, const DistanceAdjInputs<DataType>& dims) { return os; } template <typename DataType> class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<DataType>> { public: DistanceAdjTest() : x(0, stream), y(0, stream), dist_ref(0, stream), dist(0, stream) {} void SetUp() override { params = ::testing::TestWithParam < DistanceAdjInputs<DataType>::GetParam(); raft::random::Rng r(params.seed); auto m = params.m; auto n = params.n; auto k = params.k; bool isRowMajor = params.isRowMajor; cudaStream_t stream = 0; CUDA_CHECK(cudaStreamCreate(&stream)); x = rmm::device_scalar<DataType>(m * k, stream); y = rmm::device_scalar<DataType>(n * k, stream); dist_ref = rmm::device_scalar<bool>(m * n, stream); dist = rmm::device_scalar<bool>(m * n, stream); r.uniform(x.data(), m * k, DataType(-1.0), DataType(1.0), stream); r.uniform(y.data(), n * k, DataType(-1.0), DataType(1.0), stream); DataType threshold = params.eps; naiveDistanceAdj(dist_ref.data(), x.data(), y.data(), m, n, k, threshold, isRowMajor); size_t worksize = getWorkspaceSize<raft::distance::DistanceType::L2Expanded, DataType, DataType, bool>( x, y, m, n, k); rmm::device_uvector<char> workspace(worksize, stream); auto fin_op = [threshold] __device__(DataType d_val, int g_d_idx) { return d_val <= threshold; }; distance<raft::distance::DistanceType::L2Expanded, DataType, DataType, bool>(x.data(), y.data(), dist.data(), m, n, k, workspace.data(), worksize, fin_op, stream, isRowMajor); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: DistanceAdjInputs<DataType> params; rmm::device_scalar<DataType> x, y; rmm::device_scalar<bool> dist_ref, dist; }; const std::vector<DistanceAdjInputs<float>> inputsf = { {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.1f, 1024, 1024, 32, true, 1234ULL}, {1.0f, 1024, 1024, 32, true, 1234ULL}, {10.0f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.1f, 1024, 1024, 32, false, 1234ULL}, {1.0f, 1024, 1024, 32, false, 1234ULL}, {10.0f, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<float> DistanceAdjTestF; TEST_P(DistanceAdjTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceAdjInputs<double>> inputsd = { {0.01, 1024, 1024, 32, true, 1234ULL}, {0.1, 1024, 1024, 32, true, 1234ULL}, {1.0, 1024, 1024, 32, true, 1234ULL}, {10.0, 1024, 1024, 32, true, 1234ULL}, {0.01, 1024, 1024, 32, false, 1234ULL}, {0.1, 1024, 1024, 32, false, 1234ULL}, {1.0, 1024, 1024, 32, false, 1234ULL}, {10.0, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<double> DistanceAdjTestD; TEST_P(DistanceAdjTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<bool>())); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD, ::testing::ValuesIn(inputsd)); } // namespace Distance } // end namespace MLCommon
3c2a297afb85d478c4ca55d88e45e029867e3b7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(128), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
3c2a297afb85d478c4ca55d88e45e029867e3b7c.cu
#include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, 128>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
7f2ea30bdf0b7e9b2d98de601bae2e5656b5072f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014-2017 Oxford University Innovation Limited and the authors of ITM #include "ITMVisualisationHelpers_CUDA.h" using namespace ITMLib; //device implementations __global__ void ITMLib::countVisibleBlocks_device(const int *visibleEntryIDs, int noVisibleEntries, const ITMHashEntry *hashTable, uint *noBlocks, int minBlockId, int maxBlockId) { int globalIdx = threadIdx.x + blockIdx.x * blockDim.x; if (globalIdx >= noVisibleEntries) return; int entryId = visibleEntryIDs[globalIdx]; int blockId = hashTable[entryId].ptr; if ((blockId >= minBlockId) && (blockId <= maxBlockId)) atomicAdd(noBlocks, 1); } __global__ void ITMLib::buildCompleteVisibleList_device(const ITMHashEntry *hashTable, /*ITMHashCacheState *cacheStates, bool useSwapping,*/ int noTotalEntries, int *visibleEntryIDs, int *noVisibleEntries, uchar *entriesVisibleType, Matrix4f M, Vector4f projParams, Vector2i imgSize, float voxelSize) { int targetIdx = threadIdx.x + blockIdx.x * blockDim.x; if (targetIdx > noTotalEntries - 1) return; __shared__ bool shouldPrefix; unsigned char hashVisibleType = 0; //entriesVisibleType[targetIdx]; const ITMHashEntry &hashEntry = hashTable[targetIdx]; shouldPrefix = false; __syncthreads(); if (hashEntry.ptr >= 0) { shouldPrefix = true; bool isVisible, isVisibleEnlarged; checkBlockVisibility<false>(isVisible, isVisibleEnlarged, hashEntry.pos, M, projParams, voxelSize, imgSize); hashVisibleType = isVisible; } if (hashVisibleType > 0) shouldPrefix = true; __syncthreads(); if (shouldPrefix) { int offset = computePrefixSum_device<int>(hashVisibleType > 0, noVisibleEntries, blockDim.x * blockDim.y, threadIdx.x); if (offset != -1) visibleEntryIDs[offset] = targetIdx; } } __global__ void ITMLib::projectAndSplitBlocks_device(const ITMHashEntry *hashEntries, const int *visibleEntryIDs, int noVisibleEntries, const Matrix4f pose_M, const Vector4f intrinsics, const Vector2i imgSize, float voxelSize, RenderingBlock *renderingBlocks, uint *noTotalBlocks) { int in_offset = threadIdx.x + blockDim.x * blockIdx.x; const ITMHashEntry & blockData(hashEntries[visibleEntryIDs[in_offset]]); Vector2i upperLeft, lowerRight; Vector2f zRange; bool validProjection = false; if (in_offset < noVisibleEntries) if (blockData.ptr >= 0) validProjection = ProjectSingleBlock(blockData.pos, pose_M, intrinsics, imgSize, voxelSize, upperLeft, lowerRight, zRange); Vector2i requiredRenderingBlocks(ceilf((float)(lowerRight.x - upperLeft.x + 1) / renderingBlockSizeX), ceilf((float)(lowerRight.y - upperLeft.y + 1) / renderingBlockSizeY)); size_t requiredNumBlocks = requiredRenderingBlocks.x * requiredRenderingBlocks.y; if (!validProjection) requiredNumBlocks = 0; int out_offset = computePrefixSum_device<uint>(requiredNumBlocks, noTotalBlocks, blockDim.x, threadIdx.x); if (!validProjection) return; if ((out_offset == -1) || (out_offset + requiredNumBlocks > MAX_RENDERING_BLOCKS)) return; CreateRenderingBlocks(renderingBlocks, out_offset, upperLeft, lowerRight, zRange); } __global__ void ITMLib::checkProjectAndSplitBlocks_device(const ITMHashEntry *hashEntries, int noHashEntries, const Matrix4f pose_M, const Vector4f intrinsics, const Vector2i imgSize, float voxelSize, RenderingBlock *renderingBlocks, uint *noTotalBlocks) { int targetIdx = threadIdx.x + blockDim.x * blockIdx.x; if (targetIdx >= noHashEntries) return; const ITMHashEntry & hashEntry = hashEntries[targetIdx]; Vector2i upperLeft, lowerRight; Vector2f zRange; bool validProjection = false; if (hashEntry.ptr >= 0) validProjection = ProjectSingleBlock(hashEntry.pos, pose_M, intrinsics, imgSize, voxelSize, upperLeft, lowerRight, zRange); Vector2i requiredRenderingBlocks(ceilf((float)(lowerRight.x - upperLeft.x + 1) / renderingBlockSizeX), ceilf((float)(lowerRight.y - upperLeft.y + 1) / renderingBlockSizeY)); size_t requiredNumBlocks = requiredRenderingBlocks.x * requiredRenderingBlocks.y; if (!validProjection) requiredNumBlocks = 0; int out_offset = computePrefixSum_device<uint>(requiredNumBlocks, noTotalBlocks, blockDim.x, threadIdx.x); if (requiredNumBlocks == 0) return; if ((out_offset == -1) || (out_offset + requiredNumBlocks > MAX_RENDERING_BLOCKS)) return; CreateRenderingBlocks(renderingBlocks, out_offset, upperLeft, lowerRight, zRange); } __global__ void ITMLib::fillBlocks_device(uint noTotalBlocks, const RenderingBlock *renderingBlocks, Vector2i imgSize, Vector2f *minmaxData) { int x = threadIdx.x; int y = threadIdx.y; int block = blockIdx.x * 4 + blockIdx.y; if (block >= noTotalBlocks) return; const RenderingBlock & b(renderingBlocks[block]); int xpos = b.upperLeft.x + x; if (xpos > b.lowerRight.x) return; int ypos = b.upperLeft.y + y; if (ypos > b.lowerRight.y) return; Vector2f & pixel(minmaxData[xpos + ypos*imgSize.x]); atomicMin(&pixel.x, b.zRange.x); atomicMax(&pixel.y, b.zRange.y); } __global__ void ITMLib::findMissingPoints_device(int *fwdProjMissingPoints, uint *noMissingPoints, const Vector2f *minmaximg, Vector4f *forwardProjection, float *currentDepth, Vector2i imgSize) { int x = (threadIdx.x + blockIdx.x * blockDim.x), y = (threadIdx.y + blockIdx.y * blockDim.y); if (x >= imgSize.x || y >= imgSize.y) return; int locId = x + y * imgSize.x; int locId2 = (int)floor((float)x / minmaximg_subsample) + (int)floor((float)y / minmaximg_subsample) * imgSize.x; Vector4f fwdPoint = forwardProjection[locId]; Vector2f minmaxval = minmaximg[locId2]; float depth = currentDepth[locId]; bool hasPoint = false; __shared__ bool shouldPrefix; shouldPrefix = false; __syncthreads(); if ((fwdPoint.w <= 0) && ((fwdPoint.x == 0 && fwdPoint.y == 0 && fwdPoint.z == 0) || (depth > 0)) && (minmaxval.x < minmaxval.y)) //if ((fwdPoint.w <= 0) && (minmaxval.x < minmaxval.y)) { shouldPrefix = true; hasPoint = true; } __syncthreads(); if (shouldPrefix) { int offset = computePrefixSum_device(hasPoint, noMissingPoints, blockDim.x * blockDim.y, threadIdx.x + threadIdx.y * blockDim.x); if (offset != -1) fwdProjMissingPoints[offset] = locId; } } __global__ void ITMLib::forwardProject_device(Vector4f *forwardProjection, const Vector4f *pointsRay, Vector2i imgSize, Matrix4f M, Vector4f projParams, float voxelSize) { int x = (threadIdx.x + blockIdx.x * blockDim.x), y = (threadIdx.y + blockIdx.y * blockDim.y); if (x >= imgSize.x || y >= imgSize.y) return; int locId = x + y * imgSize.x; Vector4f pixel = pointsRay[locId]; int locId_new = forwardProjectPixel(pixel * voxelSize, M, projParams, imgSize); if (locId_new >= 0) forwardProjection[locId_new] = pixel; }
7f2ea30bdf0b7e9b2d98de601bae2e5656b5072f.cu
// Copyright 2014-2017 Oxford University Innovation Limited and the authors of ITM #include "ITMVisualisationHelpers_CUDA.h" using namespace ITMLib; //device implementations __global__ void ITMLib::countVisibleBlocks_device(const int *visibleEntryIDs, int noVisibleEntries, const ITMHashEntry *hashTable, uint *noBlocks, int minBlockId, int maxBlockId) { int globalIdx = threadIdx.x + blockIdx.x * blockDim.x; if (globalIdx >= noVisibleEntries) return; int entryId = visibleEntryIDs[globalIdx]; int blockId = hashTable[entryId].ptr; if ((blockId >= minBlockId) && (blockId <= maxBlockId)) atomicAdd(noBlocks, 1); } __global__ void ITMLib::buildCompleteVisibleList_device(const ITMHashEntry *hashTable, /*ITMHashCacheState *cacheStates, bool useSwapping,*/ int noTotalEntries, int *visibleEntryIDs, int *noVisibleEntries, uchar *entriesVisibleType, Matrix4f M, Vector4f projParams, Vector2i imgSize, float voxelSize) { int targetIdx = threadIdx.x + blockIdx.x * blockDim.x; if (targetIdx > noTotalEntries - 1) return; __shared__ bool shouldPrefix; unsigned char hashVisibleType = 0; //entriesVisibleType[targetIdx]; const ITMHashEntry &hashEntry = hashTable[targetIdx]; shouldPrefix = false; __syncthreads(); if (hashEntry.ptr >= 0) { shouldPrefix = true; bool isVisible, isVisibleEnlarged; checkBlockVisibility<false>(isVisible, isVisibleEnlarged, hashEntry.pos, M, projParams, voxelSize, imgSize); hashVisibleType = isVisible; } if (hashVisibleType > 0) shouldPrefix = true; __syncthreads(); if (shouldPrefix) { int offset = computePrefixSum_device<int>(hashVisibleType > 0, noVisibleEntries, blockDim.x * blockDim.y, threadIdx.x); if (offset != -1) visibleEntryIDs[offset] = targetIdx; } } __global__ void ITMLib::projectAndSplitBlocks_device(const ITMHashEntry *hashEntries, const int *visibleEntryIDs, int noVisibleEntries, const Matrix4f pose_M, const Vector4f intrinsics, const Vector2i imgSize, float voxelSize, RenderingBlock *renderingBlocks, uint *noTotalBlocks) { int in_offset = threadIdx.x + blockDim.x * blockIdx.x; const ITMHashEntry & blockData(hashEntries[visibleEntryIDs[in_offset]]); Vector2i upperLeft, lowerRight; Vector2f zRange; bool validProjection = false; if (in_offset < noVisibleEntries) if (blockData.ptr >= 0) validProjection = ProjectSingleBlock(blockData.pos, pose_M, intrinsics, imgSize, voxelSize, upperLeft, lowerRight, zRange); Vector2i requiredRenderingBlocks(ceilf((float)(lowerRight.x - upperLeft.x + 1) / renderingBlockSizeX), ceilf((float)(lowerRight.y - upperLeft.y + 1) / renderingBlockSizeY)); size_t requiredNumBlocks = requiredRenderingBlocks.x * requiredRenderingBlocks.y; if (!validProjection) requiredNumBlocks = 0; int out_offset = computePrefixSum_device<uint>(requiredNumBlocks, noTotalBlocks, blockDim.x, threadIdx.x); if (!validProjection) return; if ((out_offset == -1) || (out_offset + requiredNumBlocks > MAX_RENDERING_BLOCKS)) return; CreateRenderingBlocks(renderingBlocks, out_offset, upperLeft, lowerRight, zRange); } __global__ void ITMLib::checkProjectAndSplitBlocks_device(const ITMHashEntry *hashEntries, int noHashEntries, const Matrix4f pose_M, const Vector4f intrinsics, const Vector2i imgSize, float voxelSize, RenderingBlock *renderingBlocks, uint *noTotalBlocks) { int targetIdx = threadIdx.x + blockDim.x * blockIdx.x; if (targetIdx >= noHashEntries) return; const ITMHashEntry & hashEntry = hashEntries[targetIdx]; Vector2i upperLeft, lowerRight; Vector2f zRange; bool validProjection = false; if (hashEntry.ptr >= 0) validProjection = ProjectSingleBlock(hashEntry.pos, pose_M, intrinsics, imgSize, voxelSize, upperLeft, lowerRight, zRange); Vector2i requiredRenderingBlocks(ceilf((float)(lowerRight.x - upperLeft.x + 1) / renderingBlockSizeX), ceilf((float)(lowerRight.y - upperLeft.y + 1) / renderingBlockSizeY)); size_t requiredNumBlocks = requiredRenderingBlocks.x * requiredRenderingBlocks.y; if (!validProjection) requiredNumBlocks = 0; int out_offset = computePrefixSum_device<uint>(requiredNumBlocks, noTotalBlocks, blockDim.x, threadIdx.x); if (requiredNumBlocks == 0) return; if ((out_offset == -1) || (out_offset + requiredNumBlocks > MAX_RENDERING_BLOCKS)) return; CreateRenderingBlocks(renderingBlocks, out_offset, upperLeft, lowerRight, zRange); } __global__ void ITMLib::fillBlocks_device(uint noTotalBlocks, const RenderingBlock *renderingBlocks, Vector2i imgSize, Vector2f *minmaxData) { int x = threadIdx.x; int y = threadIdx.y; int block = blockIdx.x * 4 + blockIdx.y; if (block >= noTotalBlocks) return; const RenderingBlock & b(renderingBlocks[block]); int xpos = b.upperLeft.x + x; if (xpos > b.lowerRight.x) return; int ypos = b.upperLeft.y + y; if (ypos > b.lowerRight.y) return; Vector2f & pixel(minmaxData[xpos + ypos*imgSize.x]); atomicMin(&pixel.x, b.zRange.x); atomicMax(&pixel.y, b.zRange.y); } __global__ void ITMLib::findMissingPoints_device(int *fwdProjMissingPoints, uint *noMissingPoints, const Vector2f *minmaximg, Vector4f *forwardProjection, float *currentDepth, Vector2i imgSize) { int x = (threadIdx.x + blockIdx.x * blockDim.x), y = (threadIdx.y + blockIdx.y * blockDim.y); if (x >= imgSize.x || y >= imgSize.y) return; int locId = x + y * imgSize.x; int locId2 = (int)floor((float)x / minmaximg_subsample) + (int)floor((float)y / minmaximg_subsample) * imgSize.x; Vector4f fwdPoint = forwardProjection[locId]; Vector2f minmaxval = minmaximg[locId2]; float depth = currentDepth[locId]; bool hasPoint = false; __shared__ bool shouldPrefix; shouldPrefix = false; __syncthreads(); if ((fwdPoint.w <= 0) && ((fwdPoint.x == 0 && fwdPoint.y == 0 && fwdPoint.z == 0) || (depth > 0)) && (minmaxval.x < minmaxval.y)) //if ((fwdPoint.w <= 0) && (minmaxval.x < minmaxval.y)) { shouldPrefix = true; hasPoint = true; } __syncthreads(); if (shouldPrefix) { int offset = computePrefixSum_device(hasPoint, noMissingPoints, blockDim.x * blockDim.y, threadIdx.x + threadIdx.y * blockDim.x); if (offset != -1) fwdProjMissingPoints[offset] = locId; } } __global__ void ITMLib::forwardProject_device(Vector4f *forwardProjection, const Vector4f *pointsRay, Vector2i imgSize, Matrix4f M, Vector4f projParams, float voxelSize) { int x = (threadIdx.x + blockIdx.x * blockDim.x), y = (threadIdx.y + blockIdx.y * blockDim.y); if (x >= imgSize.x || y >= imgSize.y) return; int locId = x + y * imgSize.x; Vector4f pixel = pointsRay[locId]; int locId_new = forwardProjectPixel(pixel * voxelSize, M, projParams, imgSize); if (locId_new >= 0) forwardProjection[locId_new] = pixel; }
24b64dcddf2310fae759f481ba4e226e2e1d9da6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Primitive.cuh" /******* Global variables ******/ texture<float4, hipTextureType2D, hipReadModeElementType> texRef; float *pose_prim; unsigned int *res_dev; void AllocPosePrimMatrix() { checkCudaErrors( hipMalloc((void**)&pose_prim, 16 * sizeof(float)) ); } void SetPosePrimMatrix(float *pose_CPU) { checkCudaErrors( hipMemcpy(pose_prim, pose_CPU, 16 * sizeof(float), hipMemcpyHostToDevice) ); } void FreePosePrimMatrix() { checkCudaErrors( hipFree(pose_prim) ); } void AllocBufPrim() { checkCudaErrors( hipMalloc((void **)&res_dev, sizeof(unsigned int)) ); } void FreeBufPrim() { checkCudaErrors( hipFree(res_dev) ); } ////**** Kernel definition *****/ __device__ __forceinline__ void VertexFromBumpProcess(float *VMap, unsigned short *Bump, unsigned char *RGB_bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl) + j; int i_ref = lvl*i; int j_ref = lvl*j; int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx_ref] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx_ref])/60000.0; Shift_ind[1] = float(Bump[3*idx_ref+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx_ref+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; } __global__ void VertexFromBumpKernel(float *VMap, unsigned short *Bump, unsigned char *RGB_bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { VertexFromBumpProcess(VMap, Bump, RGB_bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void VertexOnlyFromBumpProcess(float *VMap, unsigned short *Bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl) + j; int i_ref = lvl*i; int j_ref = lvl*j; int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx_ref] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx_ref])/60000.0; Shift_ind[1] = float(Bump[3*idx_ref+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx_ref+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; } __global__ void VertexOnlyFromBumpKernel(float *VMap, unsigned short *Bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { VertexOnlyFromBumpProcess(VMap, Bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void VertexRGBFromBumpProcess(float *VMap, float *RGB, unsigned short *Bump, unsigned char *RGB_bump, unsigned char *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; int i_ref = lvl*i; int j_ref = lvl*j; //int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; RGB[4*idx] = 0.0; RGB[4*idx+1] = 0.0; RGB[4*idx+2] = 0.0; RGB[4*idx+3] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx])/60000.0; Shift_ind[1] = float(Bump[3*idx+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; RGB[4*idx] = float(RGB_bump[3*idx])/255.0; RGB[4*idx+1] = float(RGB_bump[3*idx+1])/255.0; RGB[4*idx+2] = float(RGB_bump[3*idx+2])/255.0; RGB[4*idx+3] = 1.0; } __global__ void VertexRGBFromBumpKernel(float *VMap, float *RGB, unsigned short *Bump, unsigned char *RGB_bump, unsigned char *Mask_dev, float *param, int n, int m, int lvl) { VertexRGBFromBumpProcess(VMap, RGB, Bump, RGB_bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void NormalProcess(float *NMap, float *VMap, int n, int m, bool inverse) { float p1 [3]; float p2 [3]; float p3 [3]; float n_p [3]; float n_p1 [3]; float n_p2 [3]; float n_p3 [3]; float n_p4 [3]; float norm_n; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; int idx_out = i*m + j; unsigned short n_tot = 0; if (i > n-1 || j > m-1) return; if ( i < 1 || i > n-2 || j < 1 || j > m-2 || VMap[3*idx+2] == 0.0) { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; return; } p1[0] = VMap[3*idx]; p1[1] = VMap[3*idx+1]; p1[2] = VMap[3*idx+2]; n_p1[0] = 0.0; n_p1[1] = 0.0; n_p1[2] = 0.0; n_p2[0] = 0.0; n_p2[1] = 0.0; n_p2[2] = 0.0; n_p3[0] = 0.0; n_p3[1] = 0.0; n_p3[2] = 0.0; n_p4[0] = 0.0; n_p4[1] = 0.0; n_p4[2] = 0.0; ////////////////////////// Triangle 1 ///////////////////////////////// idx = (i+1)*m + j; p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = i*m + (j+1); p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p1[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p1[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p1[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p1[0]*n_p1[0] + n_p1[1]*n_p1[1] + n_p1[2]*n_p1[2]); if (norm_n != 0.0) { n_p1[0] = n_p1[0] / sqrt(norm_n); n_p1[1] = n_p1[1] / sqrt(norm_n); n_p1[2] = n_p1[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 2 ///////////////////////////////// idx = i*m + (j+1); p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = (i-1)*m + j; p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p2[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p2[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p2[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p2[0]*n_p2[0] + n_p2[1]*n_p2[1] + n_p2[2]*n_p2[2]); if (norm_n != 0.0) { n_p2[0] = n_p2[0] / sqrt(norm_n); n_p2[1] = n_p2[1] / sqrt(norm_n); n_p2[2] = n_p2[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 3 ///////////////////////////////// idx = (i-1)*m + j; p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = i*m + (j-1); p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p3[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p3[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p3[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p3[0]*n_p3[0] + n_p3[1]*n_p3[1] + n_p3[2]*n_p3[2]); if (norm_n != 0) { n_p3[0] = n_p3[0] / sqrt(norm_n); n_p3[1] = n_p3[1] / sqrt(norm_n); n_p3[2] = n_p3[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 4 ///////////////////////////////// idx = i*m+ (j-1); p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = (i+1)*m + j; p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p4[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p4[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p4[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p4[0]*n_p4[0] + n_p4[1]*n_p4[1] + n_p4[2]*n_p4[2]); if (norm_n != 0) { n_p4[0] = n_p4[0] / sqrt(norm_n); n_p4[1] = n_p4[1] / sqrt(norm_n); n_p4[2] = n_p4[2] / sqrt(norm_n); n_tot++; } } if (n_tot == 0) { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; return; } n_p[0] = (n_p1[0] + n_p2[0] + n_p3[0] + n_p4[0])/float(n_tot); n_p[1] = (n_p1[1] + n_p2[1] + n_p3[1] + n_p4[1])/float(n_tot); n_p[2] = (n_p1[2] + n_p2[2] + n_p3[2] + n_p4[2])/float(n_tot); norm_n = sqrt(n_p[0]*n_p[0] + n_p[1]*n_p[1] + n_p[2]*n_p[2]); if (norm_n != 0) { if (inverse) { NMap[3*idx_out] = -n_p[0]/norm_n; NMap[3*idx_out+1] = -n_p[1]/norm_n; NMap[3*idx_out+2] = -n_p[2]/norm_n; } else { NMap[3*idx_out] = n_p[0]/norm_n; NMap[3*idx_out+1] = n_p[1]/norm_n; NMap[3*idx_out+2] = n_p[2]/norm_n; } } else { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; } return; } __global__ void NormalKernel(float *NMap, float *VMap, int n, int m, bool inverse) { NormalProcess(NMap, VMap, n, m, inverse); } __device__ __forceinline__ void QuadTrimProcess(unsigned int *index_dev, float *VMap, unsigned char *Mask, unsigned short min_conf, int n, int m, float thresh, int lvl) { int VIdx [4]; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl-1) + j; if (i > n/lvl-2 || j > m/lvl-2) return; VIdx[0] = i*m/lvl+j; VIdx[1] = i*m/lvl+j+1; VIdx[2] = (i+1)*m/lvl+j+1; VIdx[3] = (i+1)*m/lvl+j; float diff1 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[1]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[1]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[1]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[1]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[1]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[1]+2])); float diff2 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[2]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[2]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[2]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[2]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[2]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[2]+2])); float diff3 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[3]+2])); float diff4 = sqrt((VMap[3*VIdx[1]]-VMap[3*VIdx[2]])*(VMap[3*VIdx[1]]-VMap[3*VIdx[2]]) + (VMap[3*VIdx[1]+1]-VMap[3*VIdx[2]+1])*(VMap[3*VIdx[1]+1]-VMap[3*VIdx[2]+1]) + (VMap[3*VIdx[1]+2]-VMap[3*VIdx[2]+2])*(VMap[3*VIdx[1]+2]-VMap[3*VIdx[2]+2])); float diff5 = sqrt((VMap[3*VIdx[1]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[1]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[1]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[1]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[1]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[1]+2]-VMap[3*VIdx[3]+2])); float diff6 = sqrt((VMap[3*VIdx[2]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[2]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[2]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[2]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[2]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[2]+2]-VMap[3*VIdx[3]+2])); float max_diff = max(diff1, diff2); max_diff = max(diff3, max_diff); max_diff = max(diff4, max_diff); max_diff = max(diff5, max_diff); max_diff = max(diff6, max_diff); if (Mask[VIdx[0]] < 11 || Mask[VIdx[1]] < 11 || Mask[VIdx[2]] < 11 || Mask[VIdx[3]] < 11 || max_diff > thresh) { index_dev[4*idx] = 0; index_dev[4*idx+1] = 0; index_dev[4*idx+2] = 0; index_dev[4*idx+3] = 0; } else { index_dev[4*idx] = unsigned int(VIdx[0]); index_dev[4*idx+1] = unsigned int(VIdx[3]); index_dev[4*idx+2] = unsigned int(VIdx[2]); index_dev[4*idx+3] = unsigned int(VIdx[1]); } } __global__ void QuadTrimKernel(unsigned int *index_dev, float *VMap, unsigned char *Mask, unsigned short min_conf, int n, int m, float thresh, int lvl) { QuadTrimProcess(index_dev, VMap, Mask, min_conf, n, m, thresh, lvl); } __device__ __forceinline__ void UpdateBumpProcess(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, float *pose, int N_prim, int M_prim, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float mask_vis = Mask[4*idx+1]*255.0; float mask_tmp = Mask[4*idx]*255.0; unsigned char mask_curr = unsigned char(__float2int_rn(mask_tmp)); float vcurr[3]; vcurr[0] = VMap[3*idx]; vcurr[1] = VMap[3*idx+1]; vcurr[2] = VMap[3*idx+2]; float ncurr[3]; ncurr[0] = NMap[3*idx]; ncurr[1] = NMap[3*idx+1]; ncurr[2] = NMap[3*idx+2]; if (ncurr[0] == 0.0 && ncurr[1] == 0.0 && ncurr[2] == 0.0) { return; } // Transform current point float vcurr_l[3]; vcurr_l[0] = pose[0]*vcurr[0] + pose[4]*vcurr[1] + pose[8]*vcurr[2] + pose[12]; vcurr_l[1] = pose[1]*vcurr[0] + pose[5]*vcurr[1] + pose[9]*vcurr[2] + pose[13]; vcurr_l[2] = pose[2]*vcurr[0] + pose[6]*vcurr[1] + pose[10]*vcurr[2] + pose[14]; float ncurr_l[3]; ncurr_l[0] = pose[0]*ncurr[0] + pose[4]*ncurr[1] + pose[8]*ncurr[2]; ncurr_l[1] = pose[1]*ncurr[0] + pose[5]*ncurr[1] + pose[9]*ncurr[2]; ncurr_l[2] = pose[2]*ncurr[0] + pose[6]*ncurr[1] + pose[10]*ncurr[2]; float error_dist = (vcurr_l[0]*param[0] + vcurr_l[1]*param[1] + vcurr_l[2]*param[2]) - param[3]; float error_alpha = (ncurr_l[0]*param[0] + ncurr_l[1]*param[1] + ncurr_l[2]*param[2]); if (fabs(error_dist) > EPSILON || fabs(error_alpha) < ALPHA) { return; } float proj_a = vcurr_l[0]*param[8] + vcurr_l[1]*param[9] + vcurr_l[2]*param[10]; // e1 float proj_b = vcurr_l[0]*param[11] + vcurr_l[1]*param[12] + vcurr_l[2]*param[13]; // e2 proj_a = (proj_a-param[6])*param[4]/2.0; //shift[0] proj_b = (proj_b-param[7])*param[5]/2.0; //Shift[1]; int ind_i = __float2int_rd(proj_a); int ind_j = __float2int_rd(proj_b); if (ind_i > N_prim-1 || ind_j > M_prim-1 || ind_i < 0 || ind_j < 0 ) return; int idx_prim = ind_i*M_prim + ind_j; unsigned char mask_ref = Mask_dev[idx_prim]; __syncthreads (); if (mask_vis < 10.0 && mask_ref != 1) { if (mask_ref > 11) Mask_dev[idx_prim] = Mask_dev[idx_prim] - 1; //atomicSub(&Mask[idxBump [0]*size[1] + idxBump [1]], 1); else { Mask_dev[idx_prim] = 10; //atomicExch(&Mask[idxBump [0]*size[1] + idxBump [1]], 10); Bump_dev[3*idx_prim] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])], 0); Bump_dev[3*idx_prim+1] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])+1], 0); Bump_dev[3*idx_prim+2] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])+2], 0); RGB_dev[3*idx_prim] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])], 0); RGB_dev[3*idx_prim+1] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])+1], 0); RGB_dev[3*idx_prim+2] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])+2], 0); } return; } float shift [2]; shift[0] = proj_a - float(ind_i); shift[1] = proj_b - float(ind_j); ///// Critical Section ? /////// //unsigned char old_mask = Mask_dev[idx_prim]; //if (mask_curr > Mask_dev[idx_prim]) { // Mask_dev[idx_prim] = mask_curr; //atomicMax(&Mask[idxBump [0]*size[1] + idxBump [1]], mask_curr); //} //__syncthreads (); //if ((old_mask < mask_curr && Mask_dev[idx_prim] == mask_curr)) { Bump_dev[3*idx_prim] = unsigned short(__float2int_rn(shift[0]*60000.0)); Bump_dev[3*idx_prim+1] = unsigned short(__float2int_rn(shift[1]*60000.0)); Bump_dev[3*idx_prim+2] = unsigned short(__float2int_rn(((error_dist+15.0))*2000.0)); RGB_dev[3*idx_prim] = unsigned char(__float2int_rn(RGB[4*idx]*255.0)); RGB_dev[3*idx_prim+1] = unsigned char(__float2int_rn(RGB[4*idx+1]*255.0)); RGB_dev[3*idx_prim+2] = unsigned char(__float2int_rn(RGB[4*idx+2]*255.0)); Mask_dev[idx_prim] = mask_curr; //} } __global__ void UpdateBumpKernel(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, float *pose, int N_prim, int M_prim, int n, int m) { UpdateBumpProcess(Bump_dev, RGB_dev, Mask_dev, VMap, NMap, RGB, Mask, param, pose, N_prim, M_prim, n, m); } __device__ __forceinline__ void BBOXProcess(unsigned char *Mask, int *BBox, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; if (Mask[idx] < 11) return; if (i < BBox[0]) atomicMin(&BBox[0], i); if (i > BBox[1]) atomicMax(&BBox[1], i); if (j < BBox[2]) atomicMin(&BBox[2], j); if (j > BBox[3]) atomicMax(&BBox[3], j); } __global__ void BBOXKernel(unsigned char *Mask, int *BBox, int n, int m) { BBOXProcess(Mask, BBox, n, m); } __device__ __forceinline__ void CountProcess(unsigned char *Mask, unsigned int *res, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; if (Mask[idx] < 11) return; atomicAdd(&res[0], 1); } __global__ void CountKernel(unsigned char *Mask, unsigned int *res, int n, int m) { CountProcess(Mask, res, n, m); } __device__ __forceinline__ void OverlapProcess(float *VMap, float *param, unsigned int *counter, int n, int m, int N_prim, int M_prim) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float pt [3]; pt[0] = VMap[3*idx]; pt[1] = VMap[3*idx+1]; pt[2] = VMap[3*idx+2]; if (pt[0] == 0.0 && pt[1] == 0.0 && pt[2] == 0.0) return; atomicAdd(&counter[1], 1); // Project on parameters float error_dist = (pt[0]*param[0] + pt[1]*param[1] + pt[2]*param[2]) - param[3]; if (fabs(error_dist) > EPSILON) { return; } float proj_a = pt[0]*param[8] + pt[1]*param[9] + pt[2]*param[10]; // e1 float proj_b = pt[0]*param[11] + pt[1]*param[12] + pt[2]*param[13]; // e2 proj_a = (proj_a-param[6])*param[4]/2.0; //shift[0] proj_b = (proj_b-param[7])*param[5]/2.0; //Shift[1]; int ind_i = __float2int_rd(proj_a); int ind_j = __float2int_rd(proj_b); if (ind_i > N_prim-1 || ind_j > M_prim-1 || ind_i < 0 || ind_j < 0 ) return; atomicAdd(&counter[0], 1); } __global__ void OverlapKernel(float *VMap, float *param, unsigned int *counter, int n, int m, int N_prim, int M_prim) { OverlapProcess(VMap, param, counter, n, m, N_prim, M_prim); } __device__ __forceinline__ void SetRGBProcess(unsigned char *RGB, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float4 RGBD_v = tex2D(texRef, (float)i+0.5f, (float)j+0.5f); RGB[3*idx] = unsigned char(__float2int_rn(RGBD_v.x*255.0)); RGB[3*idx+1] = unsigned char(__float2int_rn(RGBD_v.y*255.0)); RGB[3*idx+2] = unsigned char(__float2int_rn(RGBD_v.z*255.0)); } __global__ void SetRGBKernel(unsigned char *RGB, int n, int m) { SetRGBProcess(RGB, n, m); } __device__ __forceinline__ void ReadVtxMaskPrimProcess(float *param, unsigned short *Bump, unsigned char *Mask, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float4 RGBD_v = tex2D(texRef, (float)i+0.5f, (float)j+0.5f); if (RGBD_v.x == 0.0 && RGBD_v.y == 0.0 && RGBD_v.z == 0.0) { Mask[idx] = 10; Bump[3*idx] = 0; Bump[3*idx+1] = 0; Bump[3*idx+2] = 0; return; } float pt [3]; pt[0] = (RGBD_v.x*20.0)-10.0; pt[1] = (RGBD_v.y*20.0)-10.0; pt[2] = (RGBD_v.z*20.0)-10.0; float error_dist = (pt[0])*param[0] + (pt[1])*param[1] + (pt[2])*param[2] - param[3]; if (fabs(error_dist) > EPSILON) return; float a,b; a = pt[0]*param[8] + pt[1]*param[9] + pt[2]*param[10]; b = pt[0]*param[11] + pt[1]*param[12] + pt[2]*param[13]; a = (a-param[6])*param[4]/2.0; b = (b-param[7])*param[5]/2.0; int ind_i = __float2int_rd(a); int ind_j = __float2int_rd(b); if (ind_i > n-1 || ind_j > m-1 || ind_i < 0 || ind_j < 0 ) return; float shift [2]; shift[0] = a - float(ind_i); shift[1] = b - float(ind_j); int idx_prim = ind_i*m + ind_j; /*bool test = idx_prim == idx; Mask[idx_prim] = unsigned char(test);*/ Bump[3*idx_prim] = unsigned short(__float2int_rn(shift[0]*60000.0)); Bump[3*idx_prim+1] = unsigned short(__float2int_rn(shift[1]*60000.0)); Bump[3*idx_prim+2] = unsigned short(__float2int_rn(((error_dist+15.0))*2000.0)); Mask[idx_prim] = unsigned char(__float2int_rn(RGBD_v.w*255.0)); } __global__ void ReadVtxMaskPrimkKernel(float *param, unsigned short *Bump, unsigned char *Mask, int n, int m) { ReadVtxMaskPrimProcess(param, Bump, Mask, n, m); } ///**** Function definitions ****/ void VertexFromBump(float *VMap_VBO, unsigned short *Bump_dev, unsigned char *RGB_bump_dev, unsigned short *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl, dimBlock.x); dimGrid.y = divUp (m/lvl, dimBlock.y); hipLaunchKernelGGL(( VertexFromBumpKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap_VBO, Bump_dev, RGB_bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( hipDeviceSynchronize() ); return; } void VertexOnlyFromBump(float *VMap_VBO, unsigned short *Bump_dev, unsigned short *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl, dimBlock.x); dimGrid.y = divUp (m/lvl, dimBlock.y); hipLaunchKernelGGL(( VertexOnlyFromBumpKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap_VBO, Bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( hipDeviceSynchronize() ); return; } void VertexRGBFromBump(float *VMap_VBO, float *RGB_VBO, unsigned short *Bump_dev, unsigned char *RGB_bump_dev, unsigned char *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( VertexRGBFromBumpKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap_VBO, RGB_VBO, Bump_dev, RGB_bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( hipDeviceSynchronize() ); return; } void ComputeNormal(float *NMap, float *VMap, int n, int m, bool inverse) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( NormalKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, NMap, VMap, n, m, inverse); checkCudaErrors( hipDeviceSynchronize() ); return; } void QuadTrim(float *VMap, unsigned char *Mask, unsigned int *indices_dev, unsigned short min_conf, int n, int m, float thresh, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl-1, dimBlock.x); dimGrid.y = divUp (m/lvl-1, dimBlock.y); hipLaunchKernelGGL(( QuadTrimKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, indices_dev, VMap, Mask, min_conf, n, m, thresh, lvl); checkCudaErrors( hipDeviceSynchronize() ); return; } void UpdateBump_cu(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, int N_prim, int M_prim, int n, int m) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( UpdateBumpKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Bump_dev, RGB_dev, Mask_dev, VMap, NMap, RGB, Mask, param, pose_prim, N_prim, M_prim, n, m); checkCudaErrors( hipDeviceSynchronize() ); return; } void BBOX_cu(unsigned char *Mask, int *BBox, int n, int m) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( BBOXKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Mask, BBox, n, m); checkCudaErrors( hipDeviceSynchronize() ); return; } unsigned int Count_cu(unsigned char *Mask, int n, int m) { checkCudaErrors( hipMemset(res_dev,0,sizeof(unsigned int)) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( CountKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Mask, res_dev, n, m); unsigned int res; checkCudaErrors( hipMemcpy(&res, res_dev, sizeof(unsigned int), hipMemcpyDeviceToHost) ); checkCudaErrors( hipDeviceSynchronize() ); return res; } float Overlap_cu(float *VMap, float *param, int n, int m, int N_prim, int M_prim) { unsigned int *counter_dev; checkCudaErrors( hipMalloc((void**)&counter_dev, 2*sizeof(unsigned int)) ); checkCudaErrors( hipMemset(counter_dev,0,2*sizeof(unsigned int)) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( OverlapKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, VMap, param, counter_dev, n, m, N_prim, M_prim); unsigned int counter[2]; checkCudaErrors( hipMemcpy(&counter, counter_dev, 2*sizeof(unsigned int), hipMemcpyDeviceToHost) ); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipFree(counter_dev) ); if (counter[1] == 0) return 1.0; return float(counter[0])/float(counter[1]); } void SetRGBPrim_cu(unsigned char *RGB, hipArray* Array, int n, int m) { struct hipChannelFormatDesc desc; desc = hipCreateChannelDesc<float4>(); checkCudaErrors( hipBindTextureToArray( &texRef, Array, & desc) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( SetRGBKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, RGB, n, m); checkCudaErrors( hipUnbindTexture( &texRef) ); checkCudaErrors( hipDeviceSynchronize() ); } void ReadVtxMask_cu(float *param, unsigned short *Bump, unsigned char *Mask, hipArray* Array, int n, int m) { struct hipChannelFormatDesc desc; desc = hipCreateChannelDesc<float4>(); checkCudaErrors( hipBindTextureToArray( &texRef, Array, & desc) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); hipLaunchKernelGGL(( ReadVtxMaskPrimkKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, param, Bump, Mask, n, m); checkCudaErrors( hipUnbindTexture( &texRef) ); checkCudaErrors( hipDeviceSynchronize() ); }
24b64dcddf2310fae759f481ba4e226e2e1d9da6.cu
#include "Primitive.cuh" /******* Global variables ******/ texture<float4, cudaTextureType2D, cudaReadModeElementType> texRef; float *pose_prim; unsigned int *res_dev; void AllocPosePrimMatrix() { checkCudaErrors( cudaMalloc((void**)&pose_prim, 16 * sizeof(float)) ); } void SetPosePrimMatrix(float *pose_CPU) { checkCudaErrors( cudaMemcpy(pose_prim, pose_CPU, 16 * sizeof(float), cudaMemcpyHostToDevice) ); } void FreePosePrimMatrix() { checkCudaErrors( cudaFree(pose_prim) ); } void AllocBufPrim() { checkCudaErrors( cudaMalloc((void **)&res_dev, sizeof(unsigned int)) ); } void FreeBufPrim() { checkCudaErrors( cudaFree(res_dev) ); } ////**** Kernel definition *****/ __device__ __forceinline__ void VertexFromBumpProcess(float *VMap, unsigned short *Bump, unsigned char *RGB_bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl) + j; int i_ref = lvl*i; int j_ref = lvl*j; int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx_ref] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx_ref])/60000.0; Shift_ind[1] = float(Bump[3*idx_ref+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx_ref+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; } __global__ void VertexFromBumpKernel(float *VMap, unsigned short *Bump, unsigned char *RGB_bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { VertexFromBumpProcess(VMap, Bump, RGB_bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void VertexOnlyFromBumpProcess(float *VMap, unsigned short *Bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl) + j; int i_ref = lvl*i; int j_ref = lvl*j; int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx_ref] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx_ref])/60000.0; Shift_ind[1] = float(Bump[3*idx_ref+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx_ref+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; } __global__ void VertexOnlyFromBumpKernel(float *VMap, unsigned short *Bump, unsigned short *Mask_dev, float *param, int n, int m, int lvl) { VertexOnlyFromBumpProcess(VMap, Bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void VertexRGBFromBumpProcess(float *VMap, float *RGB, unsigned short *Bump, unsigned char *RGB_bump, unsigned char *Mask_dev, float *param, int n, int m, int lvl) { float x, y, d, ind_i, ind_j; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; int i_ref = lvl*i; int j_ref = lvl*j; //int idx_ref = i_ref*m + j_ref; if (i > n-1 || j > m-1) return; if (Mask_dev[idx] < 11) { VMap[3*idx] = 0.0; VMap[3*idx+1] = 0.0; VMap[3*idx+2] = 0.0; RGB[4*idx] = 0.0; RGB[4*idx+1] = 0.0; RGB[4*idx+2] = 0.0; RGB[4*idx+3] = 0.0; return; } // Attention indexes are inversed when reading the matrix (shift_i -> e1 -> x; shift_j -> e2 -> y; j - > m_prim -> e1) ind_i = float(i_ref); ind_j = float(j_ref); float Shift_ind[2]; Shift_ind[0] = float(Bump[3*idx])/60000.0; Shift_ind[1] = float(Bump[3*idx+1])/60000.0; x = (ind_i+Shift_ind[0])*2.0/param[4] + param[6]; y = (ind_j+Shift_ind[1])*2.0/param[5] + param[7]; d = param[3] + ((float(Bump[3*idx+2])/2000.0)-15.0); VMap[3*idx] = x*param[8] + y*param[11] + d*param[0]; VMap[3*idx+1] = x*param[9] + y*param[12] + d*param[1]; VMap[3*idx+2] = x*param[10] + y*param[13] + d*param[2]; RGB[4*idx] = float(RGB_bump[3*idx])/255.0; RGB[4*idx+1] = float(RGB_bump[3*idx+1])/255.0; RGB[4*idx+2] = float(RGB_bump[3*idx+2])/255.0; RGB[4*idx+3] = 1.0; } __global__ void VertexRGBFromBumpKernel(float *VMap, float *RGB, unsigned short *Bump, unsigned char *RGB_bump, unsigned char *Mask_dev, float *param, int n, int m, int lvl) { VertexRGBFromBumpProcess(VMap, RGB, Bump, RGB_bump, Mask_dev, param, n, m, lvl); } __device__ __forceinline__ void NormalProcess(float *NMap, float *VMap, int n, int m, bool inverse) { float p1 [3]; float p2 [3]; float p3 [3]; float n_p [3]; float n_p1 [3]; float n_p2 [3]; float n_p3 [3]; float n_p4 [3]; float norm_n; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; int idx_out = i*m + j; unsigned short n_tot = 0; if (i > n-1 || j > m-1) return; if ( i < 1 || i > n-2 || j < 1 || j > m-2 || VMap[3*idx+2] == 0.0) { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; return; } p1[0] = VMap[3*idx]; p1[1] = VMap[3*idx+1]; p1[2] = VMap[3*idx+2]; n_p1[0] = 0.0; n_p1[1] = 0.0; n_p1[2] = 0.0; n_p2[0] = 0.0; n_p2[1] = 0.0; n_p2[2] = 0.0; n_p3[0] = 0.0; n_p3[1] = 0.0; n_p3[2] = 0.0; n_p4[0] = 0.0; n_p4[1] = 0.0; n_p4[2] = 0.0; ////////////////////////// Triangle 1 ///////////////////////////////// idx = (i+1)*m + j; p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = i*m + (j+1); p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p1[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p1[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p1[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p1[0]*n_p1[0] + n_p1[1]*n_p1[1] + n_p1[2]*n_p1[2]); if (norm_n != 0.0) { n_p1[0] = n_p1[0] / sqrt(norm_n); n_p1[1] = n_p1[1] / sqrt(norm_n); n_p1[2] = n_p1[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 2 ///////////////////////////////// idx = i*m + (j+1); p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = (i-1)*m + j; p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p2[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p2[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p2[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p2[0]*n_p2[0] + n_p2[1]*n_p2[1] + n_p2[2]*n_p2[2]); if (norm_n != 0.0) { n_p2[0] = n_p2[0] / sqrt(norm_n); n_p2[1] = n_p2[1] / sqrt(norm_n); n_p2[2] = n_p2[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 3 ///////////////////////////////// idx = (i-1)*m + j; p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = i*m + (j-1); p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p3[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p3[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p3[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p3[0]*n_p3[0] + n_p3[1]*n_p3[1] + n_p3[2]*n_p3[2]); if (norm_n != 0) { n_p3[0] = n_p3[0] / sqrt(norm_n); n_p3[1] = n_p3[1] / sqrt(norm_n); n_p3[2] = n_p3[2] / sqrt(norm_n); n_tot++; } } ////////////////////////// Triangle 4 ///////////////////////////////// idx = i*m+ (j-1); p2[0] = VMap[3*idx]; p2[1] = VMap[3*idx+1]; p2[2] = VMap[3*idx+2]; idx = (i+1)*m + j; p3[0] = VMap[3*idx]; p3[1] = VMap[3*idx+1]; p3[2] = VMap[3*idx+2]; if (p2[2] != 0.0 && p3[2] != 0.0) { n_p4[0] = (p2[1]-p1[1])*(p3[2]-p1[2]) - (p2[2]-p1[2])*(p3[1]-p1[1]); n_p4[1] = (p2[2]-p1[2])*(p3[0]-p1[0]) - (p2[0]-p1[0])*(p3[2]-p1[2]); n_p4[2] = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0]); norm_n = (n_p4[0]*n_p4[0] + n_p4[1]*n_p4[1] + n_p4[2]*n_p4[2]); if (norm_n != 0) { n_p4[0] = n_p4[0] / sqrt(norm_n); n_p4[1] = n_p4[1] / sqrt(norm_n); n_p4[2] = n_p4[2] / sqrt(norm_n); n_tot++; } } if (n_tot == 0) { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; return; } n_p[0] = (n_p1[0] + n_p2[0] + n_p3[0] + n_p4[0])/float(n_tot); n_p[1] = (n_p1[1] + n_p2[1] + n_p3[1] + n_p4[1])/float(n_tot); n_p[2] = (n_p1[2] + n_p2[2] + n_p3[2] + n_p4[2])/float(n_tot); norm_n = sqrt(n_p[0]*n_p[0] + n_p[1]*n_p[1] + n_p[2]*n_p[2]); if (norm_n != 0) { if (inverse) { NMap[3*idx_out] = -n_p[0]/norm_n; NMap[3*idx_out+1] = -n_p[1]/norm_n; NMap[3*idx_out+2] = -n_p[2]/norm_n; } else { NMap[3*idx_out] = n_p[0]/norm_n; NMap[3*idx_out+1] = n_p[1]/norm_n; NMap[3*idx_out+2] = n_p[2]/norm_n; } } else { NMap[3*idx_out] = 0.0; NMap[3*idx_out+1] = 0.0; NMap[3*idx_out+2] = 0.0; } return; } __global__ void NormalKernel(float *NMap, float *VMap, int n, int m, bool inverse) { NormalProcess(NMap, VMap, n, m, inverse); } __device__ __forceinline__ void QuadTrimProcess(unsigned int *index_dev, float *VMap, unsigned char *Mask, unsigned short min_conf, int n, int m, float thresh, int lvl) { int VIdx [4]; // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*(m/lvl-1) + j; if (i > n/lvl-2 || j > m/lvl-2) return; VIdx[0] = i*m/lvl+j; VIdx[1] = i*m/lvl+j+1; VIdx[2] = (i+1)*m/lvl+j+1; VIdx[3] = (i+1)*m/lvl+j; float diff1 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[1]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[1]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[1]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[1]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[1]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[1]+2])); float diff2 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[2]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[2]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[2]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[2]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[2]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[2]+2])); float diff3 = sqrt((VMap[3*VIdx[0]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[0]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[0]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[0]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[0]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[0]+2]-VMap[3*VIdx[3]+2])); float diff4 = sqrt((VMap[3*VIdx[1]]-VMap[3*VIdx[2]])*(VMap[3*VIdx[1]]-VMap[3*VIdx[2]]) + (VMap[3*VIdx[1]+1]-VMap[3*VIdx[2]+1])*(VMap[3*VIdx[1]+1]-VMap[3*VIdx[2]+1]) + (VMap[3*VIdx[1]+2]-VMap[3*VIdx[2]+2])*(VMap[3*VIdx[1]+2]-VMap[3*VIdx[2]+2])); float diff5 = sqrt((VMap[3*VIdx[1]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[1]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[1]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[1]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[1]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[1]+2]-VMap[3*VIdx[3]+2])); float diff6 = sqrt((VMap[3*VIdx[2]]-VMap[3*VIdx[3]])*(VMap[3*VIdx[2]]-VMap[3*VIdx[3]]) + (VMap[3*VIdx[2]+1]-VMap[3*VIdx[3]+1])*(VMap[3*VIdx[2]+1]-VMap[3*VIdx[3]+1]) + (VMap[3*VIdx[2]+2]-VMap[3*VIdx[3]+2])*(VMap[3*VIdx[2]+2]-VMap[3*VIdx[3]+2])); float max_diff = max(diff1, diff2); max_diff = max(diff3, max_diff); max_diff = max(diff4, max_diff); max_diff = max(diff5, max_diff); max_diff = max(diff6, max_diff); if (Mask[VIdx[0]] < 11 || Mask[VIdx[1]] < 11 || Mask[VIdx[2]] < 11 || Mask[VIdx[3]] < 11 || max_diff > thresh) { index_dev[4*idx] = 0; index_dev[4*idx+1] = 0; index_dev[4*idx+2] = 0; index_dev[4*idx+3] = 0; } else { index_dev[4*idx] = unsigned int(VIdx[0]); index_dev[4*idx+1] = unsigned int(VIdx[3]); index_dev[4*idx+2] = unsigned int(VIdx[2]); index_dev[4*idx+3] = unsigned int(VIdx[1]); } } __global__ void QuadTrimKernel(unsigned int *index_dev, float *VMap, unsigned char *Mask, unsigned short min_conf, int n, int m, float thresh, int lvl) { QuadTrimProcess(index_dev, VMap, Mask, min_conf, n, m, thresh, lvl); } __device__ __forceinline__ void UpdateBumpProcess(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, float *pose, int N_prim, int M_prim, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float mask_vis = Mask[4*idx+1]*255.0; float mask_tmp = Mask[4*idx]*255.0; unsigned char mask_curr = unsigned char(__float2int_rn(mask_tmp)); float vcurr[3]; vcurr[0] = VMap[3*idx]; vcurr[1] = VMap[3*idx+1]; vcurr[2] = VMap[3*idx+2]; float ncurr[3]; ncurr[0] = NMap[3*idx]; ncurr[1] = NMap[3*idx+1]; ncurr[2] = NMap[3*idx+2]; if (ncurr[0] == 0.0 && ncurr[1] == 0.0 && ncurr[2] == 0.0) { return; } // Transform current point float vcurr_l[3]; vcurr_l[0] = pose[0]*vcurr[0] + pose[4]*vcurr[1] + pose[8]*vcurr[2] + pose[12]; vcurr_l[1] = pose[1]*vcurr[0] + pose[5]*vcurr[1] + pose[9]*vcurr[2] + pose[13]; vcurr_l[2] = pose[2]*vcurr[0] + pose[6]*vcurr[1] + pose[10]*vcurr[2] + pose[14]; float ncurr_l[3]; ncurr_l[0] = pose[0]*ncurr[0] + pose[4]*ncurr[1] + pose[8]*ncurr[2]; ncurr_l[1] = pose[1]*ncurr[0] + pose[5]*ncurr[1] + pose[9]*ncurr[2]; ncurr_l[2] = pose[2]*ncurr[0] + pose[6]*ncurr[1] + pose[10]*ncurr[2]; float error_dist = (vcurr_l[0]*param[0] + vcurr_l[1]*param[1] + vcurr_l[2]*param[2]) - param[3]; float error_alpha = (ncurr_l[0]*param[0] + ncurr_l[1]*param[1] + ncurr_l[2]*param[2]); if (fabs(error_dist) > EPSILON || fabs(error_alpha) < ALPHA) { return; } float proj_a = vcurr_l[0]*param[8] + vcurr_l[1]*param[9] + vcurr_l[2]*param[10]; // e1 float proj_b = vcurr_l[0]*param[11] + vcurr_l[1]*param[12] + vcurr_l[2]*param[13]; // e2 proj_a = (proj_a-param[6])*param[4]/2.0; //shift[0] proj_b = (proj_b-param[7])*param[5]/2.0; //Shift[1]; int ind_i = __float2int_rd(proj_a); int ind_j = __float2int_rd(proj_b); if (ind_i > N_prim-1 || ind_j > M_prim-1 || ind_i < 0 || ind_j < 0 ) return; int idx_prim = ind_i*M_prim + ind_j; unsigned char mask_ref = Mask_dev[idx_prim]; __syncthreads (); if (mask_vis < 10.0 && mask_ref != 1) { if (mask_ref > 11) Mask_dev[idx_prim] = Mask_dev[idx_prim] - 1; //atomicSub(&Mask[idxBump [0]*size[1] + idxBump [1]], 1); else { Mask_dev[idx_prim] = 10; //atomicExch(&Mask[idxBump [0]*size[1] + idxBump [1]], 10); Bump_dev[3*idx_prim] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])], 0); Bump_dev[3*idx_prim+1] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])+1], 0); Bump_dev[3*idx_prim+2] = 0; //atomicExch(&Bump[3*(idxBump [0]*size[1] + idxBump [1])+2], 0); RGB_dev[3*idx_prim] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])], 0); RGB_dev[3*idx_prim+1] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])+1], 0); RGB_dev[3*idx_prim+2] = 0; //atomicExch(&RGB[3*(idxBump [0]*size[1] + idxBump [1])+2], 0); } return; } float shift [2]; shift[0] = proj_a - float(ind_i); shift[1] = proj_b - float(ind_j); ///// Critical Section ? /////// //unsigned char old_mask = Mask_dev[idx_prim]; //if (mask_curr > Mask_dev[idx_prim]) { // Mask_dev[idx_prim] = mask_curr; //atomicMax(&Mask[idxBump [0]*size[1] + idxBump [1]], mask_curr); //} //__syncthreads (); //if ((old_mask < mask_curr && Mask_dev[idx_prim] == mask_curr)) { Bump_dev[3*idx_prim] = unsigned short(__float2int_rn(shift[0]*60000.0)); Bump_dev[3*idx_prim+1] = unsigned short(__float2int_rn(shift[1]*60000.0)); Bump_dev[3*idx_prim+2] = unsigned short(__float2int_rn(((error_dist+15.0))*2000.0)); RGB_dev[3*idx_prim] = unsigned char(__float2int_rn(RGB[4*idx]*255.0)); RGB_dev[3*idx_prim+1] = unsigned char(__float2int_rn(RGB[4*idx+1]*255.0)); RGB_dev[3*idx_prim+2] = unsigned char(__float2int_rn(RGB[4*idx+2]*255.0)); Mask_dev[idx_prim] = mask_curr; //} } __global__ void UpdateBumpKernel(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, float *pose, int N_prim, int M_prim, int n, int m) { UpdateBumpProcess(Bump_dev, RGB_dev, Mask_dev, VMap, NMap, RGB, Mask, param, pose, N_prim, M_prim, n, m); } __device__ __forceinline__ void BBOXProcess(unsigned char *Mask, int *BBox, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; if (Mask[idx] < 11) return; if (i < BBox[0]) atomicMin(&BBox[0], i); if (i > BBox[1]) atomicMax(&BBox[1], i); if (j < BBox[2]) atomicMin(&BBox[2], j); if (j > BBox[3]) atomicMax(&BBox[3], j); } __global__ void BBOXKernel(unsigned char *Mask, int *BBox, int n, int m) { BBOXProcess(Mask, BBox, n, m); } __device__ __forceinline__ void CountProcess(unsigned char *Mask, unsigned int *res, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; if (Mask[idx] < 11) return; atomicAdd(&res[0], 1); } __global__ void CountKernel(unsigned char *Mask, unsigned int *res, int n, int m) { CountProcess(Mask, res, n, m); } __device__ __forceinline__ void OverlapProcess(float *VMap, float *param, unsigned int *counter, int n, int m, int N_prim, int M_prim) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float pt [3]; pt[0] = VMap[3*idx]; pt[1] = VMap[3*idx+1]; pt[2] = VMap[3*idx+2]; if (pt[0] == 0.0 && pt[1] == 0.0 && pt[2] == 0.0) return; atomicAdd(&counter[1], 1); // Project on parameters float error_dist = (pt[0]*param[0] + pt[1]*param[1] + pt[2]*param[2]) - param[3]; if (fabs(error_dist) > EPSILON) { return; } float proj_a = pt[0]*param[8] + pt[1]*param[9] + pt[2]*param[10]; // e1 float proj_b = pt[0]*param[11] + pt[1]*param[12] + pt[2]*param[13]; // e2 proj_a = (proj_a-param[6])*param[4]/2.0; //shift[0] proj_b = (proj_b-param[7])*param[5]/2.0; //Shift[1]; int ind_i = __float2int_rd(proj_a); int ind_j = __float2int_rd(proj_b); if (ind_i > N_prim-1 || ind_j > M_prim-1 || ind_i < 0 || ind_j < 0 ) return; atomicAdd(&counter[0], 1); } __global__ void OverlapKernel(float *VMap, float *param, unsigned int *counter, int n, int m, int N_prim, int M_prim) { OverlapProcess(VMap, param, counter, n, m, N_prim, M_prim); } __device__ __forceinline__ void SetRGBProcess(unsigned char *RGB, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float4 RGBD_v = tex2D(texRef, (float)i+0.5f, (float)j+0.5f); RGB[3*idx] = unsigned char(__float2int_rn(RGBD_v.x*255.0)); RGB[3*idx+1] = unsigned char(__float2int_rn(RGBD_v.y*255.0)); RGB[3*idx+2] = unsigned char(__float2int_rn(RGBD_v.z*255.0)); } __global__ void SetRGBKernel(unsigned char *RGB, int n, int m) { SetRGBProcess(RGB, n, m); } __device__ __forceinline__ void ReadVtxMaskPrimProcess(float *param, unsigned short *Bump, unsigned char *Mask, int n, int m) { // identifiant de thread a deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_Y; int idx = i*m + j; if (i > n-1 || j > m-1) return; float4 RGBD_v = tex2D(texRef, (float)i+0.5f, (float)j+0.5f); if (RGBD_v.x == 0.0 && RGBD_v.y == 0.0 && RGBD_v.z == 0.0) { Mask[idx] = 10; Bump[3*idx] = 0; Bump[3*idx+1] = 0; Bump[3*idx+2] = 0; return; } float pt [3]; pt[0] = (RGBD_v.x*20.0)-10.0; pt[1] = (RGBD_v.y*20.0)-10.0; pt[2] = (RGBD_v.z*20.0)-10.0; float error_dist = (pt[0])*param[0] + (pt[1])*param[1] + (pt[2])*param[2] - param[3]; if (fabs(error_dist) > EPSILON) return; float a,b; a = pt[0]*param[8] + pt[1]*param[9] + pt[2]*param[10]; b = pt[0]*param[11] + pt[1]*param[12] + pt[2]*param[13]; a = (a-param[6])*param[4]/2.0; b = (b-param[7])*param[5]/2.0; int ind_i = __float2int_rd(a); int ind_j = __float2int_rd(b); if (ind_i > n-1 || ind_j > m-1 || ind_i < 0 || ind_j < 0 ) return; float shift [2]; shift[0] = a - float(ind_i); shift[1] = b - float(ind_j); int idx_prim = ind_i*m + ind_j; /*bool test = idx_prim == idx; Mask[idx_prim] = unsigned char(test);*/ Bump[3*idx_prim] = unsigned short(__float2int_rn(shift[0]*60000.0)); Bump[3*idx_prim+1] = unsigned short(__float2int_rn(shift[1]*60000.0)); Bump[3*idx_prim+2] = unsigned short(__float2int_rn(((error_dist+15.0))*2000.0)); Mask[idx_prim] = unsigned char(__float2int_rn(RGBD_v.w*255.0)); } __global__ void ReadVtxMaskPrimkKernel(float *param, unsigned short *Bump, unsigned char *Mask, int n, int m) { ReadVtxMaskPrimProcess(param, Bump, Mask, n, m); } ///**** Function definitions ****/ void VertexFromBump(float *VMap_VBO, unsigned short *Bump_dev, unsigned char *RGB_bump_dev, unsigned short *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl, dimBlock.x); dimGrid.y = divUp (m/lvl, dimBlock.y); VertexFromBumpKernel<<<dimGrid, dimBlock>>>(VMap_VBO, Bump_dev, RGB_bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( cudaDeviceSynchronize() ); return; } void VertexOnlyFromBump(float *VMap_VBO, unsigned short *Bump_dev, unsigned short *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl, dimBlock.x); dimGrid.y = divUp (m/lvl, dimBlock.y); VertexOnlyFromBumpKernel<<<dimGrid, dimBlock>>>(VMap_VBO, Bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( cudaDeviceSynchronize() ); return; } void VertexRGBFromBump(float *VMap_VBO, float *RGB_VBO, unsigned short *Bump_dev, unsigned char *RGB_bump_dev, unsigned char *Mask_dev, float *param_dev, int n, int m, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); VertexRGBFromBumpKernel<<<dimGrid, dimBlock>>>(VMap_VBO, RGB_VBO, Bump_dev, RGB_bump_dev, Mask_dev, param_dev, n, m, lvl); checkCudaErrors( cudaDeviceSynchronize() ); return; } void ComputeNormal(float *NMap, float *VMap, int n, int m, bool inverse) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); NormalKernel<<<dimGrid, dimBlock>>>(NMap, VMap, n, m, inverse); checkCudaErrors( cudaDeviceSynchronize() ); return; } void QuadTrim(float *VMap, unsigned char *Mask, unsigned int *indices_dev, unsigned short min_conf, int n, int m, float thresh, int lvl) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n/lvl-1, dimBlock.x); dimGrid.y = divUp (m/lvl-1, dimBlock.y); QuadTrimKernel<<<dimGrid, dimBlock>>>(indices_dev, VMap, Mask, min_conf, n, m, thresh, lvl); checkCudaErrors( cudaDeviceSynchronize() ); return; } void UpdateBump_cu(unsigned short *Bump_dev, unsigned char *RGB_dev, unsigned char *Mask_dev, float *VMap, float *NMap, float *RGB, float *Mask, float *param, int N_prim, int M_prim, int n, int m) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); UpdateBumpKernel<<<dimGrid, dimBlock>>>(Bump_dev, RGB_dev, Mask_dev, VMap, NMap, RGB, Mask, param, pose_prim, N_prim, M_prim, n, m); checkCudaErrors( cudaDeviceSynchronize() ); return; } void BBOX_cu(unsigned char *Mask, int *BBox, int n, int m) { dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); BBOXKernel<<<dimGrid, dimBlock>>>(Mask, BBox, n, m); checkCudaErrors( cudaDeviceSynchronize() ); return; } unsigned int Count_cu(unsigned char *Mask, int n, int m) { checkCudaErrors( cudaMemset(res_dev,0,sizeof(unsigned int)) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); CountKernel<<<dimGrid, dimBlock>>>(Mask, res_dev, n, m); unsigned int res; checkCudaErrors( cudaMemcpy(&res, res_dev, sizeof(unsigned int), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaDeviceSynchronize() ); return res; } float Overlap_cu(float *VMap, float *param, int n, int m, int N_prim, int M_prim) { unsigned int *counter_dev; checkCudaErrors( cudaMalloc((void**)&counter_dev, 2*sizeof(unsigned int)) ); checkCudaErrors( cudaMemset(counter_dev,0,2*sizeof(unsigned int)) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); OverlapKernel<<<dimGrid, dimBlock>>>(VMap, param, counter_dev, n, m, N_prim, M_prim); unsigned int counter[2]; checkCudaErrors( cudaMemcpy(&counter, counter_dev, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaFree(counter_dev) ); if (counter[1] == 0) return 1.0; return float(counter[0])/float(counter[1]); } void SetRGBPrim_cu(unsigned char *RGB, cudaArray* Array, int n, int m) { struct cudaChannelFormatDesc desc; desc = cudaCreateChannelDesc<float4>(); checkCudaErrors( cudaBindTextureToArray( &texRef, Array, & desc) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); SetRGBKernel<<<dimGrid, dimBlock>>>(RGB, n, m); checkCudaErrors( cudaUnbindTexture( &texRef) ); checkCudaErrors( cudaDeviceSynchronize() ); } void ReadVtxMask_cu(float *param, unsigned short *Bump, unsigned char *Mask, cudaArray* Array, int n, int m) { struct cudaChannelFormatDesc desc; desc = cudaCreateChannelDesc<float4>(); checkCudaErrors( cudaBindTextureToArray( &texRef, Array, & desc) ); dim3 dimBlock(THREAD_SIZE_X, THREAD_SIZE_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n, dimBlock.x); dimGrid.y = divUp (m, dimBlock.y); ReadVtxMaskPrimkKernel<<<dimGrid, dimBlock>>>(param, Bump, Mask, n, m); checkCudaErrors( cudaUnbindTexture( &texRef) ); checkCudaErrors( cudaDeviceSynchronize() ); }
eaba829a86f6b5cfd31f23f3642aa8f9be307dd7.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <cstdio> #include <random> #include <vector> #include "Tensor.cuh" #include "WarpSelectKernel.cuh" int main(int argc, char* argv[]) { if (argc != 5) { printf("Usage: %s <number of rows> <number of columns> <k> <repeat>\n", argv[0]); return 1; } const int rows = atoi(argv[1]); const int cols = atoi(argv[2]); const int k = atoi(argv[3]); const int repeat = atoi(argv[4]); size_t numInputElem = (size_t)rows * cols; size_t numOutputElem = (size_t)rows * k; size_t numInputElemBytes = sizeof(float) * numInputElem; size_t numOutputElemBytes = sizeof(float) * numOutputElem; std::default_random_engine g (19937); std::uniform_real_distribution<float> uniform_distr (0.f, 1.f); std::vector<float> h_in(numInputElem); for (size_t i = 0; i < numInputElem; i++) h_in[i] = uniform_distr(g); std::vector<float> h_k(numOutputElem); std::vector<int> h_v(numOutputElem); float *d_in, *d_k; int *d_v; hipMalloc((void**)&d_in, numInputElemBytes); hipMalloc((void**)&d_k, numOutputElemBytes); hipMalloc((void**)&d_v, numOutputElemBytes); faiss::gpu::Tensor<float, 2, true> input (d_in, {rows, cols}); // input value faiss::gpu::Tensor<float, 2, true> output (d_k, {rows, k}); // output value faiss::gpu::Tensor<int, 2, true> index (d_v, {rows, k}); // output index input.copyFrom(h_in, 0); output.copyFrom(h_k, 0); index.copyFrom(h_v, 0); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { faiss::gpu::runWarpSelect(input, output, index, true, k, 0); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of runWarpSelect: %f (us)\n", (time * 1e-3f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { faiss::gpu::runWarpSelect(input, output, index, false, k, 0); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of runWarpSelect: %f (us)\n", (time * 1e-3f) / repeat); h_k = output.copyToVector(0); h_v = index.copyToVector(0); double s1 = 0.0, s2 = 0.0; for (int i = 0; i < rows; i++) { s1 += h_k[i * k]; s2 += h_in[h_v[i * k]]; } printf("checksum: %lf %lf\n", s1, s2); hipFree(d_in); hipFree(d_k); hipFree(d_v); return 0; }
eaba829a86f6b5cfd31f23f3642aa8f9be307dd7.cu
#include <chrono> #include <cstdio> #include <random> #include <vector> #include "Tensor.cuh" #include "WarpSelectKernel.cuh" int main(int argc, char* argv[]) { if (argc != 5) { printf("Usage: %s <number of rows> <number of columns> <k> <repeat>\n", argv[0]); return 1; } const int rows = atoi(argv[1]); const int cols = atoi(argv[2]); const int k = atoi(argv[3]); const int repeat = atoi(argv[4]); size_t numInputElem = (size_t)rows * cols; size_t numOutputElem = (size_t)rows * k; size_t numInputElemBytes = sizeof(float) * numInputElem; size_t numOutputElemBytes = sizeof(float) * numOutputElem; std::default_random_engine g (19937); std::uniform_real_distribution<float> uniform_distr (0.f, 1.f); std::vector<float> h_in(numInputElem); for (size_t i = 0; i < numInputElem; i++) h_in[i] = uniform_distr(g); std::vector<float> h_k(numOutputElem); std::vector<int> h_v(numOutputElem); float *d_in, *d_k; int *d_v; cudaMalloc((void**)&d_in, numInputElemBytes); cudaMalloc((void**)&d_k, numOutputElemBytes); cudaMalloc((void**)&d_v, numOutputElemBytes); faiss::gpu::Tensor<float, 2, true> input (d_in, {rows, cols}); // input value faiss::gpu::Tensor<float, 2, true> output (d_k, {rows, k}); // output value faiss::gpu::Tensor<int, 2, true> index (d_v, {rows, k}); // output index input.copyFrom(h_in, 0); output.copyFrom(h_k, 0); index.copyFrom(h_v, 0); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { faiss::gpu::runWarpSelect(input, output, index, true, k, 0); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of runWarpSelect: %f (us)\n", (time * 1e-3f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { faiss::gpu::runWarpSelect(input, output, index, false, k, 0); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of runWarpSelect: %f (us)\n", (time * 1e-3f) / repeat); h_k = output.copyToVector(0); h_v = index.copyToVector(0); double s1 = 0.0, s2 = 0.0; for (int i = 0; i < rows; i++) { s1 += h_k[i * k]; s2 += h_in[h_v[i * k]]; } printf("checksum: %lf %lf\n", s1, s2); cudaFree(d_in); cudaFree(d_k); cudaFree(d_v); return 0; }
4db57fc393f98996a5e5c94f2b6a98e32637cd67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <chrono> #include <unistd.h> #include <thread> #include "simulator/simulator.h" #include "simulator/collision_utils.h" #include "include/progress_bar.h" using namespace std; __device__ __host__ void resolve_collisions_for_batch_stimuli( Transition* const all_data, unsigned int* const all_size, Data* data, const CAPACITY_TYPE* lengths, const CAPACITY_TYPE& capacity, const NUM_ARG_TYPE& num_outputs ) { // TODO parallelize CAPACITY_TYPE stimuli_lengths[N_STIMULI_PARALLEL]; for (NUM_ARG_TYPE i_output = 0; i_output < num_outputs; i_output++) { if (data[i_output].is_dummy) continue; for(int i_stimuli = 0; i_stimuli < N_STIMULI_PARALLEL; i_stimuli++) { stimuli_lengths[i_stimuli] = lengths[num_outputs * i_stimuli + i_output]; assert(stimuli_lengths[i_stimuli] <= capacity); } resolve_collisions_for_batch_waveform( all_data + data[i_output].transition_offset, stimuli_lengths, capacity, all_size + data[i_output].size_offset, N_STIMULI_PARALLEL ); } } __device__ __host__ bool OOB(unsigned int index, InputData* const data, unsigned int i) { return index >= data[i].size; } __device__ __host__ void prepare_stimuli_head( Timestamp* s_timestamps, Values* s_values, const Transition* const all_input_data, InputData* data, const NUM_ARG_TYPE& num_wires, const CAPACITY_TYPE* progress_updates ) { bool is_head = true; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) is_head &= (all_input_data[data[i].offset + progress_updates[i]].timestamp == 0); s_timestamps[0] = is_head ? -1 : all_input_data[data[0].offset + progress_updates[0]].timestamp; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) { s_values[i] = all_input_data[data[i].offset + progress_updates[i]].value; } } __device__ __host__ void slice_waveforms( Timestamp* s_timestamps, DelayInfo* s_delay_infos, Values* s_values, const Transition* const all_input_data, InputData* data, const CAPACITY_TYPE& capacity, const NUM_ARG_TYPE& num_wires, bool* overflow_ptr ) { CAPACITY_TYPE progress[MAX_NUM_MODULE_INPUT] = {0}; NUM_ARG_TYPE num_finished = 0; unsigned int write_stimuli_index = 0, write_transition_index = 1; auto C = capacity * num_wires; prepare_stimuli_head( s_timestamps + write_stimuli_index * capacity, s_values + write_stimuli_index * C, all_input_data, data, num_wires, progress ); for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) if (data[i].size <= 1) num_finished++; while (num_finished < num_wires) { // find min timestamp and find advancing wires Timestamp min_t = LONG_LONG_MAX; NUM_ARG_TYPE advancing[MAX_NUM_MODULE_INPUT], num_advancing = 0; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) { const auto& index = progress[i]; if (OOB(index + 1, data, i)) continue; const auto& t = all_input_data[data[i].offset + index + 1].timestamp; if (t <= min_t) { if (t < min_t) { min_t = t; num_advancing = 0; } advancing[num_advancing] = i; num_advancing++; } } assert(min_t != LONG_LONG_MAX); // decide where to write if (write_transition_index + num_advancing - 1 >= capacity) { write_transition_index = 1; write_stimuli_index++; if (write_stimuli_index >= N_STIMULI_PARALLEL) break; prepare_stimuli_head( s_timestamps + write_stimuli_index * capacity, s_values + write_stimuli_index * C, all_input_data, data, num_wires, progress ); } // advance indices for (NUM_ARG_TYPE i = 0; i < num_advancing; ++i) { auto& index = progress[advancing[i]]; index++; if (OOB(index + 1, data, advancing[i])) num_finished++; } for (NUM_ARG_TYPE i = 0; i < num_advancing; ++i) { s_timestamps[write_stimuli_index * capacity + write_transition_index + i] = min_t; const auto& advancing_arg = advancing[i]; s_delay_infos[write_stimuli_index * capacity + write_transition_index + i].arg = advancing_arg; s_delay_infos[write_stimuli_index * capacity + write_transition_index + i].edge_type = get_edge_type( all_input_data[data[advancing_arg].offset + progress[advancing_arg] - 1].value, all_input_data[data[advancing_arg].offset + progress[advancing_arg]].value ); for (NUM_ARG_TYPE j = 0; j < num_wires; ++j) { const auto& transition = all_input_data[data[j].offset + progress[j]]; s_values[ write_stimuli_index * C + (write_transition_index + i) * num_wires + j ] = transition.value; } } write_transition_index += num_advancing; } if (write_stimuli_index >= N_STIMULI_PARALLEL) *overflow_ptr = true; } __host__ __device__ unsigned int get_table_row_index(const Values* s_input_values, NUM_ARG_TYPE num_input) { unsigned int row_index = 0; for (NUM_ARG_TYPE i_input = 0; i_input < num_input; ++i_input) { row_index = (row_index << 2) + static_cast<unsigned int>(s_input_values[i_input]) - 1; } return row_index; } __host__ __device__ void stepping_algorithm( const Timestamp* s_input_timestamps, const Values* s_input_values, Transition** output_data, const ModuleSpec* module_spec, const CAPACITY_TYPE& capacity ) { for (CAPACITY_TYPE i = 0; i < capacity; i++) { if (s_input_values[i * module_spec->num_input] == Values::PAD) break; const auto row_index = get_table_row_index(s_input_values + i * module_spec->num_input, module_spec->num_input); for (NUM_ARG_TYPE j = 0; j < module_spec->num_output; ++j) { if (output_data[j] == nullptr) continue; output_data[j][i].value = module_spec->table[row_index * module_spec->num_output + j]; output_data[j][i].timestamp = s_input_timestamps[i]; } } } __device__ void slice_module( const ModuleSpec* const module_spec, const Transition* const all_input_data, InputData* const input_data, const CAPACITY_TYPE& capacity, bool* overflow_ptr, Timestamp* s_input_timestamps, DelayInfo* s_input_delay_infos, Values* s_input_values ) { slice_waveforms( s_input_timestamps, s_input_delay_infos, s_input_values, all_input_data, input_data, capacity, module_spec->num_input, overflow_ptr ); } __global__ void slice_kernel( BatchResource batch_resource, Transition* input_data, Timestamp* s_timestamps, DelayInfo* s_delay_infos, Values* s_values ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& overflow_ptr = batch_resource.overflows[blockIdx.x]; const auto& module_input_data = &batch_resource.input_data_schedule[blockIdx.x * MAX_NUM_MODULE_INPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; const auto& s_timestamp_offset = batch_resource.s_timestamp_offsets[blockIdx.x]; const auto& s_delay_info_offset = batch_resource.s_delay_info_offsets[blockIdx.x]; const auto& s_values_offset = batch_resource.s_value_offsets[blockIdx.x]; slice_module( module_spec, input_data, module_input_data, capacity, overflow_ptr, s_timestamps + s_timestamp_offset, s_delay_infos + s_delay_info_offset, s_values + s_values_offset ); } } __device__ void stepping_module( const ModuleSpec* const module_spec, Transition* const all_output_data, Data* const output_data, const CAPACITY_TYPE& capacity, Timestamp* s_input_timestamps, Values* s_input_values ) { Transition* output_data_ptrs_for_stimuli[MAX_NUM_MODULE_OUTPUT] = { nullptr }; const unsigned int& stimuli_idx = threadIdx.x; for (NUM_ARG_TYPE i = 0; i < module_spec->num_output; ++i) { if (output_data[i].is_dummy) continue; output_data_ptrs_for_stimuli[i] = all_output_data + output_data[i].transition_offset + stimuli_idx * capacity; } auto offset = stimuli_idx * static_cast<unsigned int>(capacity); stepping_algorithm( s_input_timestamps + offset, s_input_values + offset * static_cast<unsigned int>(module_spec->num_input), output_data_ptrs_for_stimuli, module_spec, capacity ); } __global__ void stepping_kernel( BatchResource batch_resource, Transition* output_data, Timestamp* s_timestamps, Values* s_values ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; const auto& s_timestamp_offset = batch_resource.s_timestamp_offsets[blockIdx.x]; const auto& s_values_offset = batch_resource.s_value_offsets[blockIdx.x]; stepping_module( module_spec, output_data, module_output_data, capacity, s_timestamps + s_timestamp_offset, s_values + s_values_offset ); } } __device__ void compute_delay_module( const ModuleSpec* const module_spec, const SDFPath* const sdf_paths, const unsigned int& sdf_num_rows, Transition* const all_output_data, Data* const output_data, const CAPACITY_TYPE& capacity, DelayInfo* s_input_delay_infos, CAPACITY_TYPE* lengths ) { Transition* output_data_ptrs_for_stimuli[MAX_NUM_MODULE_OUTPUT] = { nullptr }; const unsigned int& stimuli_idx = threadIdx.x; for (NUM_ARG_TYPE i = 0; i < module_spec->num_output; ++i) { if (output_data[i].is_dummy) continue; output_data_ptrs_for_stimuli[i] = all_output_data + output_data[i].transition_offset + stimuli_idx * capacity; } DelayInfo* delay_info_for_stimuli = s_input_delay_infos + stimuli_idx * capacity; compute_delay( output_data_ptrs_for_stimuli, capacity, delay_info_for_stimuli, module_spec->num_output, module_spec->num_input, sdf_paths, sdf_num_rows, lengths + stimuli_idx * module_spec->num_output ); } __global__ void compute_delay_kernel( BatchResource batch_resource, SDFPath* sdf, Transition* output_data, DelayInfo* s_delay_infos, CAPACITY_TYPE* s_lengths ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& sdf_offset = batch_resource.sdf_offsets[blockIdx.x]; const auto& s_delay_info_offset = batch_resource.s_delay_info_offsets[blockIdx.x]; const auto& s_lengths_offset = batch_resource.s_length_offsets[blockIdx.x]; const auto& sdf_num_rows = batch_resource.sdf_num_rows[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; compute_delay_module( module_spec, sdf + sdf_offset, sdf_num_rows, output_data, module_output_data, capacity, s_delay_infos + s_delay_info_offset, s_lengths + s_lengths_offset ); } } __device__ void resolve_collision_module( const ModuleSpec* const module_spec, Transition* const all_output_data, Data* const output_data, unsigned int* const all_size, const CAPACITY_TYPE& capacity, CAPACITY_TYPE* lengths ) { resolve_collisions_for_batch_stimuli( all_output_data, all_size, output_data, lengths, capacity, module_spec->num_output ); } __global__ void resolve_collision_kernel( BatchResource batch_resource, Transition* output_data, unsigned int* output_size, CAPACITY_TYPE* s_lengths ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& s_length_offset = batch_resource.s_length_offsets[blockIdx.x]; const auto& capacity = batch_resource.capacities[blockIdx.x]; resolve_collision_module( module_spec, output_data, module_output_data, output_size, capacity, s_lengths + s_length_offset ); } } void Simulator::run() { cout << "| Status: Running Simulation... " << endl; size_t new_heap_size = N_CELL_PARALLEL * N_STIMULI_PARALLEL * INITIAL_CAPACITY * 8 * (sizeof(Timestamp) + sizeof(DelayInfo) + sizeof(Values) * MAX_NUM_MODULE_ARGS); cudaErrorCheck(hipDeviceSetLimit(hipLimitMallocHeapSize, new_heap_size)); cout << "| Adjusted heap size to be " << new_heap_size << " bytes" << endl; unsigned int num_layers = circuit.cell_schedule.size(); cout << "| Total " << num_layers << " layers" << endl; ProgressBar progress_bar(num_layers); vector<CellProcessor> cell_processors; cell_processors.resize(N_STREAM); for (unsigned int i_layer = 0; i_layer < num_layers; i_layer++) { const auto& schedule_layer = circuit.cell_schedule[i_layer]; const auto& split_cells = split_vector(schedule_layer, N_STREAM); ResourceCollector<SDFPath, Cell> sdf_collector(schedule_layer.size()); ResourceCollector<Transition, Wire> input_data_collector(schedule_layer.size() * MAX_NUM_MODULE_INPUT); vector<thread> init_threads; for (int i = 0; i < N_STREAM; ++i) { init_threads.emplace_back( CellProcessor::layer_init_async, std::ref(cell_processors[i]), std::ref(split_cells[i]) ); } for (auto& thread : init_threads) thread.join(); for (int i = 0; i < N_STREAM; ++i) cell_processors[i].layer_init(split_cells[i], sdf_collector, input_data_collector); auto* device_sdf = sdf_collector.get(); auto* device_input_data = input_data_collector.get(); for (auto& processor : cell_processors) processor.set_ptrs(device_sdf, device_input_data); hipDeviceSynchronize(); bool all_finished = false; while (not all_finished) { all_finished = true; for (auto& processor : cell_processors) { all_finished &= processor.run(); } } sdf_collector.free(); input_data_collector.free(); progress_bar.Progressed(i_layer + 1); } cout << endl; } CellProcessor::CellProcessor() { hipStreamCreate(&stream); batch_data.init(); } CellProcessor::~CellProcessor() { hipStreamDestroy(stream); batch_data.free(); output_data_collector.free(); output_size_collector.free(); s_timestamp_collector.free(); s_delay_info_collector.free(); s_values_collector.free(); overflow_collector.free(); } void CellProcessor::layer_init_async(CellProcessor& processor, const std::vector<Cell*>& cells) { processor.session_id = 0; processor.overflow_collector.reset(); processor.overflow_collector.reserve(cells.size()); processor.job_queue = stack<Cell*, std::vector<Cell*>>(cells); for (auto* cell : cells) cell->init_async(); } void CellProcessor::layer_init( const std::vector<Cell *> &cells, ResourceCollector<SDFPath, Cell>& sdf_collector, ResourceCollector<Transition, Wire>& input_data_collector ) { for (auto* cell : cells) cell->init(sdf_collector, input_data_collector, overflow_collector); } void CellProcessor::set_ptrs(SDFPath *sdf, Transition *input_data) { device_sdf = sdf; device_input_data = input_data; } bool CellProcessor::run() { if (has_unfinished) return false; else if (!has_unfinished and job_queue.empty()) return true; processing_cells.clear(); s_timestamp_collector.reset(); s_delay_info_collector.reset(); s_values_collector.reset(); s_length_collector.reset(); output_data_collector.reset(); output_size_collector.reset(); auto* device_overflow = overflow_collector.get_device(stream); for (int i = 0; i < N_CELL_PARALLEL; i++) { if (job_queue.empty()) break; auto* cell = job_queue.top(); processing_cells.insert(cell); cell->prepare_resource( session_id, resource_buffer, device_overflow, output_data_collector, output_size_collector, s_timestamp_collector, s_delay_info_collector, s_values_collector, s_length_collector ); if (cell->finished()) job_queue.pop(); } batch_data.set(resource_buffer, stream); auto* device_output_data = output_data_collector.get_device(stream); auto* device_s_timestamps = s_timestamp_collector.get_device(stream); auto* device_s_delay_infos = s_delay_info_collector.get_device(stream); auto* device_s_values = s_values_collector.get_device(stream); auto* device_s_lengths = s_length_collector.get_device(stream); auto* device_sizes = output_size_collector.get_device(stream); hipLaunchKernelGGL(( slice_kernel), dim3(resource_buffer.size), dim3(1), 0, stream, batch_data, device_input_data, device_s_timestamps, device_s_delay_infos, device_s_values ); hipLaunchKernelGGL(( stepping_kernel), dim3(resource_buffer.size), dim3(N_STIMULI_PARALLEL), 0, stream, batch_data, device_output_data, device_s_timestamps, device_s_values ); hipLaunchKernelGGL(( compute_delay_kernel), dim3(resource_buffer.size), dim3(N_STIMULI_PARALLEL), 0, stream, batch_data, device_sdf, device_output_data, device_s_delay_infos, device_s_lengths ); hipLaunchKernelGGL(( resolve_collision_kernel), dim3(resource_buffer.size), dim3(1), 0, stream, batch_data, device_output_data, device_sizes, device_s_lengths ); host_output_data = output_data_collector.get_host(stream); host_sizes = output_size_collector.get_host(stream); host_overflows = overflow_collector.get_host(stream); hipStreamAddCallback(stream, CellProcessor::post_process, (void*) this, 0); has_unfinished = true; resource_buffer.clear(); return false; } CUDART_CB void CellProcessor::post_process(hipStream_t stream, hipError_t status, void* userData) { auto* processor = static_cast<CellProcessor*>(userData); unordered_set<Cell*> non_overflow_cells, finished_cells; for (auto* cell : processor->processing_cells) { bool finished = cell->finished(); bool overflow = cell->handle_overflow(processor->host_overflows); if (not overflow) { non_overflow_cells.insert(cell); if (finished) finished_cells.insert(cell); } else if (finished) processor->job_queue.push(cell); } for (auto* cell : non_overflow_cells) cell->gather_results(processor->host_output_data, processor->host_sizes); for (auto* cell : finished_cells) cell->free(); processor->session_id++; processor->has_unfinished = false; }
4db57fc393f98996a5e5c94f2b6a98e32637cd67.cu
#include <cassert> #include <chrono> #include <unistd.h> #include <thread> #include "simulator/simulator.h" #include "simulator/collision_utils.h" #include "include/progress_bar.h" using namespace std; __device__ __host__ void resolve_collisions_for_batch_stimuli( Transition* const all_data, unsigned int* const all_size, Data* data, const CAPACITY_TYPE* lengths, const CAPACITY_TYPE& capacity, const NUM_ARG_TYPE& num_outputs ) { // TODO parallelize CAPACITY_TYPE stimuli_lengths[N_STIMULI_PARALLEL]; for (NUM_ARG_TYPE i_output = 0; i_output < num_outputs; i_output++) { if (data[i_output].is_dummy) continue; for(int i_stimuli = 0; i_stimuli < N_STIMULI_PARALLEL; i_stimuli++) { stimuli_lengths[i_stimuli] = lengths[num_outputs * i_stimuli + i_output]; assert(stimuli_lengths[i_stimuli] <= capacity); } resolve_collisions_for_batch_waveform( all_data + data[i_output].transition_offset, stimuli_lengths, capacity, all_size + data[i_output].size_offset, N_STIMULI_PARALLEL ); } } __device__ __host__ bool OOB(unsigned int index, InputData* const data, unsigned int i) { return index >= data[i].size; } __device__ __host__ void prepare_stimuli_head( Timestamp* s_timestamps, Values* s_values, const Transition* const all_input_data, InputData* data, const NUM_ARG_TYPE& num_wires, const CAPACITY_TYPE* progress_updates ) { bool is_head = true; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) is_head &= (all_input_data[data[i].offset + progress_updates[i]].timestamp == 0); s_timestamps[0] = is_head ? -1 : all_input_data[data[0].offset + progress_updates[0]].timestamp; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) { s_values[i] = all_input_data[data[i].offset + progress_updates[i]].value; } } __device__ __host__ void slice_waveforms( Timestamp* s_timestamps, DelayInfo* s_delay_infos, Values* s_values, const Transition* const all_input_data, InputData* data, const CAPACITY_TYPE& capacity, const NUM_ARG_TYPE& num_wires, bool* overflow_ptr ) { CAPACITY_TYPE progress[MAX_NUM_MODULE_INPUT] = {0}; NUM_ARG_TYPE num_finished = 0; unsigned int write_stimuli_index = 0, write_transition_index = 1; auto C = capacity * num_wires; prepare_stimuli_head( s_timestamps + write_stimuli_index * capacity, s_values + write_stimuli_index * C, all_input_data, data, num_wires, progress ); for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) if (data[i].size <= 1) num_finished++; while (num_finished < num_wires) { // find min timestamp and find advancing wires Timestamp min_t = LONG_LONG_MAX; NUM_ARG_TYPE advancing[MAX_NUM_MODULE_INPUT], num_advancing = 0; for (NUM_ARG_TYPE i = 0; i < num_wires; ++i) { const auto& index = progress[i]; if (OOB(index + 1, data, i)) continue; const auto& t = all_input_data[data[i].offset + index + 1].timestamp; if (t <= min_t) { if (t < min_t) { min_t = t; num_advancing = 0; } advancing[num_advancing] = i; num_advancing++; } } assert(min_t != LONG_LONG_MAX); // decide where to write if (write_transition_index + num_advancing - 1 >= capacity) { write_transition_index = 1; write_stimuli_index++; if (write_stimuli_index >= N_STIMULI_PARALLEL) break; prepare_stimuli_head( s_timestamps + write_stimuli_index * capacity, s_values + write_stimuli_index * C, all_input_data, data, num_wires, progress ); } // advance indices for (NUM_ARG_TYPE i = 0; i < num_advancing; ++i) { auto& index = progress[advancing[i]]; index++; if (OOB(index + 1, data, advancing[i])) num_finished++; } for (NUM_ARG_TYPE i = 0; i < num_advancing; ++i) { s_timestamps[write_stimuli_index * capacity + write_transition_index + i] = min_t; const auto& advancing_arg = advancing[i]; s_delay_infos[write_stimuli_index * capacity + write_transition_index + i].arg = advancing_arg; s_delay_infos[write_stimuli_index * capacity + write_transition_index + i].edge_type = get_edge_type( all_input_data[data[advancing_arg].offset + progress[advancing_arg] - 1].value, all_input_data[data[advancing_arg].offset + progress[advancing_arg]].value ); for (NUM_ARG_TYPE j = 0; j < num_wires; ++j) { const auto& transition = all_input_data[data[j].offset + progress[j]]; s_values[ write_stimuli_index * C + (write_transition_index + i) * num_wires + j ] = transition.value; } } write_transition_index += num_advancing; } if (write_stimuli_index >= N_STIMULI_PARALLEL) *overflow_ptr = true; } __host__ __device__ unsigned int get_table_row_index(const Values* s_input_values, NUM_ARG_TYPE num_input) { unsigned int row_index = 0; for (NUM_ARG_TYPE i_input = 0; i_input < num_input; ++i_input) { row_index = (row_index << 2) + static_cast<unsigned int>(s_input_values[i_input]) - 1; } return row_index; } __host__ __device__ void stepping_algorithm( const Timestamp* s_input_timestamps, const Values* s_input_values, Transition** output_data, const ModuleSpec* module_spec, const CAPACITY_TYPE& capacity ) { for (CAPACITY_TYPE i = 0; i < capacity; i++) { if (s_input_values[i * module_spec->num_input] == Values::PAD) break; const auto row_index = get_table_row_index(s_input_values + i * module_spec->num_input, module_spec->num_input); for (NUM_ARG_TYPE j = 0; j < module_spec->num_output; ++j) { if (output_data[j] == nullptr) continue; output_data[j][i].value = module_spec->table[row_index * module_spec->num_output + j]; output_data[j][i].timestamp = s_input_timestamps[i]; } } } __device__ void slice_module( const ModuleSpec* const module_spec, const Transition* const all_input_data, InputData* const input_data, const CAPACITY_TYPE& capacity, bool* overflow_ptr, Timestamp* s_input_timestamps, DelayInfo* s_input_delay_infos, Values* s_input_values ) { slice_waveforms( s_input_timestamps, s_input_delay_infos, s_input_values, all_input_data, input_data, capacity, module_spec->num_input, overflow_ptr ); } __global__ void slice_kernel( BatchResource batch_resource, Transition* input_data, Timestamp* s_timestamps, DelayInfo* s_delay_infos, Values* s_values ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& overflow_ptr = batch_resource.overflows[blockIdx.x]; const auto& module_input_data = &batch_resource.input_data_schedule[blockIdx.x * MAX_NUM_MODULE_INPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; const auto& s_timestamp_offset = batch_resource.s_timestamp_offsets[blockIdx.x]; const auto& s_delay_info_offset = batch_resource.s_delay_info_offsets[blockIdx.x]; const auto& s_values_offset = batch_resource.s_value_offsets[blockIdx.x]; slice_module( module_spec, input_data, module_input_data, capacity, overflow_ptr, s_timestamps + s_timestamp_offset, s_delay_infos + s_delay_info_offset, s_values + s_values_offset ); } } __device__ void stepping_module( const ModuleSpec* const module_spec, Transition* const all_output_data, Data* const output_data, const CAPACITY_TYPE& capacity, Timestamp* s_input_timestamps, Values* s_input_values ) { Transition* output_data_ptrs_for_stimuli[MAX_NUM_MODULE_OUTPUT] = { nullptr }; const unsigned int& stimuli_idx = threadIdx.x; for (NUM_ARG_TYPE i = 0; i < module_spec->num_output; ++i) { if (output_data[i].is_dummy) continue; output_data_ptrs_for_stimuli[i] = all_output_data + output_data[i].transition_offset + stimuli_idx * capacity; } auto offset = stimuli_idx * static_cast<unsigned int>(capacity); stepping_algorithm( s_input_timestamps + offset, s_input_values + offset * static_cast<unsigned int>(module_spec->num_input), output_data_ptrs_for_stimuli, module_spec, capacity ); } __global__ void stepping_kernel( BatchResource batch_resource, Transition* output_data, Timestamp* s_timestamps, Values* s_values ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; const auto& s_timestamp_offset = batch_resource.s_timestamp_offsets[blockIdx.x]; const auto& s_values_offset = batch_resource.s_value_offsets[blockIdx.x]; stepping_module( module_spec, output_data, module_output_data, capacity, s_timestamps + s_timestamp_offset, s_values + s_values_offset ); } } __device__ void compute_delay_module( const ModuleSpec* const module_spec, const SDFPath* const sdf_paths, const unsigned int& sdf_num_rows, Transition* const all_output_data, Data* const output_data, const CAPACITY_TYPE& capacity, DelayInfo* s_input_delay_infos, CAPACITY_TYPE* lengths ) { Transition* output_data_ptrs_for_stimuli[MAX_NUM_MODULE_OUTPUT] = { nullptr }; const unsigned int& stimuli_idx = threadIdx.x; for (NUM_ARG_TYPE i = 0; i < module_spec->num_output; ++i) { if (output_data[i].is_dummy) continue; output_data_ptrs_for_stimuli[i] = all_output_data + output_data[i].transition_offset + stimuli_idx * capacity; } DelayInfo* delay_info_for_stimuli = s_input_delay_infos + stimuli_idx * capacity; compute_delay( output_data_ptrs_for_stimuli, capacity, delay_info_for_stimuli, module_spec->num_output, module_spec->num_input, sdf_paths, sdf_num_rows, lengths + stimuli_idx * module_spec->num_output ); } __global__ void compute_delay_kernel( BatchResource batch_resource, SDFPath* sdf, Transition* output_data, DelayInfo* s_delay_infos, CAPACITY_TYPE* s_lengths ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& sdf_offset = batch_resource.sdf_offsets[blockIdx.x]; const auto& s_delay_info_offset = batch_resource.s_delay_info_offsets[blockIdx.x]; const auto& s_lengths_offset = batch_resource.s_length_offsets[blockIdx.x]; const auto& sdf_num_rows = batch_resource.sdf_num_rows[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& capacity = batch_resource.capacities[blockIdx.x]; compute_delay_module( module_spec, sdf + sdf_offset, sdf_num_rows, output_data, module_output_data, capacity, s_delay_infos + s_delay_info_offset, s_lengths + s_lengths_offset ); } } __device__ void resolve_collision_module( const ModuleSpec* const module_spec, Transition* const all_output_data, Data* const output_data, unsigned int* const all_size, const CAPACITY_TYPE& capacity, CAPACITY_TYPE* lengths ) { resolve_collisions_for_batch_stimuli( all_output_data, all_size, output_data, lengths, capacity, module_spec->num_output ); } __global__ void resolve_collision_kernel( BatchResource batch_resource, Transition* output_data, unsigned int* output_size, CAPACITY_TYPE* s_lengths ) { if (blockIdx.x < batch_resource.num_modules) { const auto& module_spec = batch_resource.module_specs[blockIdx.x]; const auto& module_output_data = &batch_resource.output_data_schedule[blockIdx.x * MAX_NUM_MODULE_OUTPUT]; const auto& s_length_offset = batch_resource.s_length_offsets[blockIdx.x]; const auto& capacity = batch_resource.capacities[blockIdx.x]; resolve_collision_module( module_spec, output_data, module_output_data, output_size, capacity, s_lengths + s_length_offset ); } } void Simulator::run() { cout << "| Status: Running Simulation... " << endl; size_t new_heap_size = N_CELL_PARALLEL * N_STIMULI_PARALLEL * INITIAL_CAPACITY * 8 * (sizeof(Timestamp) + sizeof(DelayInfo) + sizeof(Values) * MAX_NUM_MODULE_ARGS); cudaErrorCheck(cudaDeviceSetLimit(cudaLimitMallocHeapSize, new_heap_size)); cout << "| Adjusted heap size to be " << new_heap_size << " bytes" << endl; unsigned int num_layers = circuit.cell_schedule.size(); cout << "| Total " << num_layers << " layers" << endl; ProgressBar progress_bar(num_layers); vector<CellProcessor> cell_processors; cell_processors.resize(N_STREAM); for (unsigned int i_layer = 0; i_layer < num_layers; i_layer++) { const auto& schedule_layer = circuit.cell_schedule[i_layer]; const auto& split_cells = split_vector(schedule_layer, N_STREAM); ResourceCollector<SDFPath, Cell> sdf_collector(schedule_layer.size()); ResourceCollector<Transition, Wire> input_data_collector(schedule_layer.size() * MAX_NUM_MODULE_INPUT); vector<thread> init_threads; for (int i = 0; i < N_STREAM; ++i) { init_threads.emplace_back( CellProcessor::layer_init_async, std::ref(cell_processors[i]), std::ref(split_cells[i]) ); } for (auto& thread : init_threads) thread.join(); for (int i = 0; i < N_STREAM; ++i) cell_processors[i].layer_init(split_cells[i], sdf_collector, input_data_collector); auto* device_sdf = sdf_collector.get(); auto* device_input_data = input_data_collector.get(); for (auto& processor : cell_processors) processor.set_ptrs(device_sdf, device_input_data); cudaDeviceSynchronize(); bool all_finished = false; while (not all_finished) { all_finished = true; for (auto& processor : cell_processors) { all_finished &= processor.run(); } } sdf_collector.free(); input_data_collector.free(); progress_bar.Progressed(i_layer + 1); } cout << endl; } CellProcessor::CellProcessor() { cudaStreamCreate(&stream); batch_data.init(); } CellProcessor::~CellProcessor() { cudaStreamDestroy(stream); batch_data.free(); output_data_collector.free(); output_size_collector.free(); s_timestamp_collector.free(); s_delay_info_collector.free(); s_values_collector.free(); overflow_collector.free(); } void CellProcessor::layer_init_async(CellProcessor& processor, const std::vector<Cell*>& cells) { processor.session_id = 0; processor.overflow_collector.reset(); processor.overflow_collector.reserve(cells.size()); processor.job_queue = stack<Cell*, std::vector<Cell*>>(cells); for (auto* cell : cells) cell->init_async(); } void CellProcessor::layer_init( const std::vector<Cell *> &cells, ResourceCollector<SDFPath, Cell>& sdf_collector, ResourceCollector<Transition, Wire>& input_data_collector ) { for (auto* cell : cells) cell->init(sdf_collector, input_data_collector, overflow_collector); } void CellProcessor::set_ptrs(SDFPath *sdf, Transition *input_data) { device_sdf = sdf; device_input_data = input_data; } bool CellProcessor::run() { if (has_unfinished) return false; else if (!has_unfinished and job_queue.empty()) return true; processing_cells.clear(); s_timestamp_collector.reset(); s_delay_info_collector.reset(); s_values_collector.reset(); s_length_collector.reset(); output_data_collector.reset(); output_size_collector.reset(); auto* device_overflow = overflow_collector.get_device(stream); for (int i = 0; i < N_CELL_PARALLEL; i++) { if (job_queue.empty()) break; auto* cell = job_queue.top(); processing_cells.insert(cell); cell->prepare_resource( session_id, resource_buffer, device_overflow, output_data_collector, output_size_collector, s_timestamp_collector, s_delay_info_collector, s_values_collector, s_length_collector ); if (cell->finished()) job_queue.pop(); } batch_data.set(resource_buffer, stream); auto* device_output_data = output_data_collector.get_device(stream); auto* device_s_timestamps = s_timestamp_collector.get_device(stream); auto* device_s_delay_infos = s_delay_info_collector.get_device(stream); auto* device_s_values = s_values_collector.get_device(stream); auto* device_s_lengths = s_length_collector.get_device(stream); auto* device_sizes = output_size_collector.get_device(stream); slice_kernel<<<resource_buffer.size, 1, 0, stream>>>( batch_data, device_input_data, device_s_timestamps, device_s_delay_infos, device_s_values ); stepping_kernel<<<resource_buffer.size, N_STIMULI_PARALLEL, 0, stream>>>( batch_data, device_output_data, device_s_timestamps, device_s_values ); compute_delay_kernel<<<resource_buffer.size, N_STIMULI_PARALLEL, 0, stream>>>( batch_data, device_sdf, device_output_data, device_s_delay_infos, device_s_lengths ); resolve_collision_kernel<<<resource_buffer.size, 1, 0, stream>>>( batch_data, device_output_data, device_sizes, device_s_lengths ); host_output_data = output_data_collector.get_host(stream); host_sizes = output_size_collector.get_host(stream); host_overflows = overflow_collector.get_host(stream); cudaStreamAddCallback(stream, CellProcessor::post_process, (void*) this, 0); has_unfinished = true; resource_buffer.clear(); return false; } CUDART_CB void CellProcessor::post_process(cudaStream_t stream, cudaError_t status, void* userData) { auto* processor = static_cast<CellProcessor*>(userData); unordered_set<Cell*> non_overflow_cells, finished_cells; for (auto* cell : processor->processing_cells) { bool finished = cell->finished(); bool overflow = cell->handle_overflow(processor->host_overflows); if (not overflow) { non_overflow_cells.insert(cell); if (finished) finished_cells.insert(cell); } else if (finished) processor->job_queue.push(cell); } for (auto* cell : non_overflow_cells) cell->gather_results(processor->host_output_data, processor->host_sizes); for (auto* cell : finished_cells) cell->free(); processor->session_id++; processor->has_unfinished = false; }
54bb64e91033612dcffc5647eebc7aa89a68e656.hip
// !!! This is a file automatically generated by hipify!!! #define _CRT_SECURE_NO_WARNINGS #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #include <fstream> #include <string> #include <iomanip> #include <vector> #define D65_ROW 531 // D65 #define D65_COL 2 // D65 #define OBS_ROW 441 // #define OBS_COL 4 // #define DATA_ROW 391 // (390 - 780 nm) #define DATA_MIN 390 // #define DATA_MAX 780 // #define PI 3.141592 // #define BLOCKSIZE 371 // 1 #define DATANUM 50 // #define CALCNUM 10000 // #define SIMNUM 10000 // /* */ //#define F_PATH "C:/Users/ryoin/source/repos/color_simulation_cuda/color_simulation_cuda" using namespace std; /* CUDA */ #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) /* */ int getFileData(vector<vector<double> >& d65_data, vector<vector<double> >& obs_data) { /* */ FILE* fp_d65, * fp_obs; /* EOF */ int ret; /* */ int count = 0; /* D65 */ /* */ fp_d65 = fopen("./d65.csv", "r"); /* */ if (fp_d65 == NULL) { cout << "File open error" << endl; return -1; } /* */ for (int i = 0; i < D65_ROW; i++) { /* 1 */ ret = fscanf(fp_d65, "%lf, %lf", &(d65_data[count][0]), &(d65_data[count][1])); /* */ if (d65_data[count][0] == DATA_MAX) { count = 0; break; } /* */ if (d65_data[count][0] >= DATA_MIN) { count++; } /* */ if (ret == EOF) { cout << "error" << endl; return -1; } } fclose(fp_d65); /* */ /* */ fp_obs = fopen("./std_obs_10deg.csv", "r"); /* */ if (fp_obs == NULL) { cout << "File open error" << endl; return -1; } /* */ for (int i = 0; i < OBS_ROW; i++) { /* 1 */ ret = fscanf(fp_obs, "%lf, %lf, %lf, %lf", &(obs_data[i][0]), &(obs_data[i][1]), &(obs_data[i][2]), &(obs_data[i][3])); /* */ if (obs_data[count][0] == DATA_MAX) { count = 0; break; } /* */ if (obs_data[count][0] >= DATA_MIN) { count++; } /* */ if (ret == EOF) { cout << "error" << endl; return -1; } } fclose(fp_d65); return 0; } /* */ void makeGaussShift(vector<vector<double> >& shift_data) { double mu = 0; // double sigma = 0; // double d_max = 0; // double w_length = 0; // 0-1 /* */ srand((unsigned int)time(NULL)); /* 1010 */ for (int i = 0; i < 10; i++) { mu = (double)DATA_MIN + ((double)DATA_MAX - (double)DATA_MIN) / 10 * i; sigma = 20 + (80 * (double)rand() / RAND_MAX); /* */ for (int j = 0; j < DATA_ROW; j++) { shift_data[j][i] = 1 / (sqrt(2 * PI) * sigma) * exp(-pow(((double)(DATA_MIN + j) - mu), 2) / (2 * sigma * sigma)); /* () */ if (d_max < shift_data[j][i]) { d_max = shift_data[j][i]; } } /* 0-1 */ w_length = (double)rand() / RAND_MAX; // 0-1 for (int j = 0; j < DATA_ROW; j++) { shift_data[j][i] = shift_data[j][i] / d_max * w_length; } /* */ d_max = 0; } } /* vector */ void cpyVecToArray(vector<vector<double> >& d65_data, vector<vector<double> >& obs_data, vector<vector<double> >& shift_data, double* d65, double* obs_x, double* obs_y, double* obs_z, double* gauss_data) { for (int i = 0; i < DATA_ROW; i++) { d65[i] = d65_data[i][1]; obs_x[i] = obs_data[i][1]; obs_y[i] = obs_data[i][2]; obs_z[i] = obs_data[i][3]; for (int j = 0; j < 10; j++) { int aPos = DATA_ROW * j + i; gauss_data[aPos] = shift_data[i][j]; } } } /* */ int getRemain(void) { /* */ int remain = 0; /* */ for (int i = 1; i < BLOCKSIZE; i *= 2) { remain = BLOCKSIZE - i; } /* */ return remain; } /* */ template<int BLOCK_SIZE> __global__ void colorSim(double simNum,double *g_data,double *d65,double *obs_x,double *obs_y,double *obs_z,double *result,int remain) { /* CUDA */ int ix = threadIdx.x; int aPos = 0; /* */ __shared__ int sim_order[10]; /* */ __shared__ double sim_num; /* */ __shared__ double calc_data[BLOCK_SIZE][3]; /* */ __shared__ double g_max; g_max = 0; /* */ double gaussian = 0; /* () */ __shared__ double g_comp[BLOCK_SIZE]; /* */ g_comp[ix] = 0; /* sim_order */ if (ix == 0) { sim_num = blockIdx.x + simNum; int count = 512; // for (int i = 0; i < 10; i++) { if (sim_num >= count) { sim_num -= count; sim_order[i] = 1; } else { sim_order[i] = 0; } count = count / 2; } /*printf("%d %d %d %d %d %d %d %d %d %d\n", sim_order[0], sim_order[1], sim_order[2], sim_order[3], sim_order[4], sim_order[5], sim_order[6], sim_order[7], sim_order[8], sim_order[9] );*/ } /* */ __syncthreads(); /* */ for (int i = 0; i < 10; i++) { aPos = i * BLOCK_SIZE + ix; if (sim_order[i] == 1) { gaussian += g_data[aPos]; g_comp[ix] += g_data[aPos]; } } /* */ __syncthreads(); /* */ if (ix == 0) { for (int i = 0; i < BLOCK_SIZE; i++) { if (g_max < g_comp[i]) { g_max = g_comp[i]; } } } /* */ __syncthreads(); /* g_max 10.99 */ if (g_max >= 1) { gaussian = gaussian / g_max * 0.99; } /* */ __syncthreads(); for (int i = 0; i < CALCNUM; i++) { /* */ calc_data[ix][0] = d65[ix] * obs_x[ix] * pow(gaussian, (0.01 * i)); calc_data[ix][1] = d65[ix] * obs_y[ix] * pow(gaussian, (0.01 * i)); calc_data[ix][2] = d65[ix] * obs_z[ix] * pow(gaussian, (0.01 * i)); /* */ __syncthreads(); /* () */ /* 0 */ if (remain != 0) { /* */ if (ix < remain) { calc_data[ix][0] += calc_data[BLOCK_SIZE - ix - 1][0]; calc_data[ix][1] += calc_data[BLOCK_SIZE - ix - 1][1]; calc_data[ix][2] += calc_data[BLOCK_SIZE - ix - 1][2]; } } /* */ if (BLOCK_SIZE >= 256) { if (ix < 128) { calc_data[ix][0] += calc_data[ix + 128][0]; calc_data[ix][1] += calc_data[ix + 128][1]; calc_data[ix][2] += calc_data[ix + 128][2]; }__syncthreads(); } if (BLOCK_SIZE >= 128) { if (ix < 64) { calc_data[ix][0] += calc_data[ix + 64][0]; calc_data[ix][1] += calc_data[ix + 64][1]; calc_data[ix][2] += calc_data[ix + 64][2]; }__syncthreads(); } if (BLOCK_SIZE >= 64) { if (ix < 32) { calc_data[ix][0] += calc_data[ix + 32][0]; calc_data[ix][1] += calc_data[ix + 32][1]; calc_data[ix][2] += calc_data[ix + 32][2]; } __syncthreads();} if (BLOCK_SIZE >= 32) { if (ix < 16) { calc_data[ix][0] += calc_data[ix + 16][0]; calc_data[ix][1] += calc_data[ix + 16][1]; calc_data[ix][2] += calc_data[ix + 16][2]; } __syncthreads(); } if (BLOCK_SIZE >= 16) { if (ix < 8) { calc_data[ix][0] += calc_data[ix + 8][0]; calc_data[ix][1] += calc_data[ix + 8][1]; calc_data[ix][2] += calc_data[ix + 8][2]; }__syncthreads(); } if (BLOCK_SIZE >= 8) { if (ix < 4) { calc_data[ix][0] += calc_data[ix + 4][0]; calc_data[ix][1] += calc_data[ix + 4][1]; calc_data[ix][2] += calc_data[ix + 4][2]; } __syncthreads(); } if (BLOCK_SIZE >= 4) { if (ix < 2) { calc_data[ix][0] += calc_data[ix + 2][0]; calc_data[ix][1] += calc_data[ix + 2][1]; calc_data[ix][2] += calc_data[ix + 2][2]; } __syncthreads(); } if (BLOCK_SIZE >= 2) { if (ix < 1) { calc_data[ix][0] += calc_data[ix + 1][0]; calc_data[ix][1] += calc_data[ix + 1][1]; calc_data[ix][2] += calc_data[ix + 1][2]; } __syncthreads(); } /*if (ix == 0) { for (int j = 1; j < BLOCK_SIZE; j++) { calc_data[ix][0] += calc_data[i][0]; calc_data[ix][1] += calc_data[i][1]; calc_data[ix][2] += calc_data[i][2]; } }*/ /* */ if (ix == 0) { /* aPos */ aPos = blockIdx.x * 3 * CALCNUM + i; //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][0]; /* aPos */ aPos = blockIdx.x * 3 * CALCNUM + i + CALCNUM; //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][1]; /* aPos */ aPos = blockIdx.x * 3 * CALCNUM + i + (2 * CALCNUM); //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][2]; //printf("%.3lf %.3lf %.3lf\n", calc_data[0][0], calc_data[0][1], calc_data[0][2]); } /* */ __syncthreads(); } } int main(void) { /* D65 */ vector<vector<double> > d65_data(DATA_ROW, vector<double>(D65_COL, 0)); /* */ vector<vector<double> > obs_data(DATA_ROW, vector<double>(OBS_COL, 0)); /*10 */ vector<vector<double> > gauss_shift(DATA_ROW, vector<double>(10, 0)); /* */ int remain = getRemain(); /* */ double* d65, * obs_x, * obs_y, * obs_z, * gauss_data, * result, * fin_result; d65 = new double[DATA_ROW]; obs_x= new double[DATA_ROW]; obs_y = new double[DATA_ROW]; obs_z = new double[DATA_ROW]; gauss_data = new double[DATA_ROW * 10]; result = new double[3 * DATANUM * CALCNUM]; fin_result = new double[3 * SIMNUM * CALCNUM]; /* CUDA */ double* d_d65, * d_obs_x, * d_obs_y, * d_obs_z, * d_gauss_data, *d_result; char* d_sim_order; /* GPU */ hipMalloc((void**)&d_d65, DATA_ROW * sizeof(double)); hipMalloc((void**)&d_obs_x, DATA_ROW * sizeof(double)); hipMalloc((void**)&d_obs_y, DATA_ROW * sizeof(double)); hipMalloc((void**)&d_obs_z, DATA_ROW * sizeof(double)); hipMalloc((void**)&d_gauss_data, DATA_ROW * 10 * sizeof(double)); hipMalloc((void**)&d_result, 3 * DATANUM * CALCNUM * sizeof(double)); /* */ int f_result = getFileData(d65_data, obs_data); /* */ makeGaussShift(gauss_shift); /* vector1 */ cpyVecToArray(d65_data, obs_data, gauss_shift,d65,obs_x,obs_y,obs_z,gauss_data); /* CUDA */ hipMemcpy(d_d65, d65, DATA_ROW * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_obs_x, obs_x, DATA_ROW * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_obs_y, obs_y, DATA_ROW * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_obs_z, obs_z, DATA_ROW * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_gauss_data, gauss_data, DATA_ROW * 10 * sizeof(double), hipMemcpyHostToDevice); int count = 0; for (int i = 0; i < (SIMNUM - DATANUM); i += DATANUM) { colorSim<DATA_ROW> << <DATANUM, DATA_ROW >> > ((i+200), d_gauss_data, d_d65, d_obs_x, d_obs_y, d_obs_z, d_result, remain); hipDeviceSynchronize(); /* */ hipMemcpy(result, d_result, 3 * DATANUM * CALCNUM * sizeof(double), hipMemcpyDeviceToHost); for (int j = 0; j < (3 * DATANUM * CALCNUM); j++) { int aPos = (count * 3 * DATANUM * CALCNUM) + j; fin_result[aPos] = result[j]; } count++; } /* */ string fname = "sim_result.csv"; /* */ ofstream o_file(fname); /* */ o_file << fixed << setprecision(3); /* */ //for (int i = 0; i < CALCNUM; i++) { // for (int j = 0; j < SIMNUM; j++) { // for (int k = 0; k < 3; k++) { // int apos = i + (3 * j + k) * CALCNUM; // o_file << fin_result[apos] << ","; // } // } // o_file << endl << flush; //} //for (int i = 0; i < SIMNUM; i++) { // for (int j = 0; j < 3; j++) { // for (int k = 0; k < CALCNUM; k++) { // int apos = (3 * i * CALCNUM) + (j * CALCNUM) + k; // o_file << fin_result[apos] << ","; // } // o_file << endl << flush; // } //} for (int i = 0; i < CALCNUM; i++) { for (int j = 0; j < SIMNUM; j++) { int apos = i + (3 * j) * CALCNUM; double X = fin_result[apos]; double Y = fin_result[apos + CALCNUM]; double Z = fin_result[apos + (2 * CALCNUM)]; double x = X / (X + Y + Z); double y = Y / (X + Y + Z); double z = Z / (X + Y + Z); o_file << x << "," << y << "," << z << ","; } o_file << endl << flush; } /* */ hipFree(d_d65); hipFree(d_gauss_data); hipFree(d_obs_x); hipFree(d_obs_y); hipFree(d_obs_z); hipFree(d_result); /* */ delete[] d65; delete[] obs_x; delete[] obs_y; delete[] obs_z; delete[] gauss_data; delete[] result; delete[] fin_result; return 0; }
54bb64e91033612dcffc5647eebc7aa89a68e656.cu
#define _CRT_SECURE_NO_WARNINGS #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #include <fstream> #include <string> #include <iomanip> #include <vector> #define D65_ROW 531 // D65の行数 #define D65_COL 2 // D65の列数 #define OBS_ROW 441 // 標準観測者の行数 #define OBS_COL 4 // 標準観測者の列数 #define DATA_ROW 391 // 計算で使用するデータの行数 (390 - 780 nm) #define DATA_MIN 390 // 使用する周波数の最小値 #define DATA_MAX 780 // 使用する周波数の最大値 #define PI 3.141592 // 円周率 #define BLOCKSIZE 371 // 1ブロック当たりのスレッド数 #define DATANUM 50 // 計算する数 #define CALCNUM 10000 // べき乗する数 #define SIMNUM 10000 // シミュレーションする回数 /* 出力ファイルパス */ //#define F_PATH "C:/Users/ryoin/source/repos/color_simulation_cuda/color_simulation_cuda" using namespace std; /* CUDAエラーチェック */ #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) /* ファイルからデータを読み込む関数 */ int getFileData(vector<vector<double> >& d65_data, vector<vector<double> >& obs_data) { /* ファイルポインタ */ FILE* fp_d65, * fp_obs; /* EOFを検出する変数 */ int ret; /* カウンター */ int count = 0; /* D65の読み込み */ /* ファイルオープン */ fp_d65 = fopen("./d65.csv", "r"); /* 正しく開けているかをチェック */ if (fp_d65 == NULL) { cout << "File open error" << endl; return -1; } /* ファイル読み込み */ for (int i = 0; i < D65_ROW; i++) { /* 1行ずつ読み込む */ ret = fscanf(fp_d65, "%lf, %lf", &(d65_data[count][0]), &(d65_data[count][1])); /* 終了判定 */ if (d65_data[count][0] == DATA_MAX) { count = 0; break; } /* カウンタの更新 */ if (d65_data[count][0] >= DATA_MIN) { count++; } /* エラーを検出した際の処理 */ if (ret == EOF) { cout << "error" << endl; return -1; } } fclose(fp_d65); /* 標準観測者の読み込み */ /* ファイルオープン */ fp_obs = fopen("./std_obs_10deg.csv", "r"); /* 正しく開けているかをチェック */ if (fp_obs == NULL) { cout << "File open error" << endl; return -1; } /* ファイル読み込み */ for (int i = 0; i < OBS_ROW; i++) { /* 1行ずつ読み込む */ ret = fscanf(fp_obs, "%lf, %lf, %lf, %lf", &(obs_data[i][0]), &(obs_data[i][1]), &(obs_data[i][2]), &(obs_data[i][3])); /* 終了判定 */ if (obs_data[count][0] == DATA_MAX) { count = 0; break; } /* カウンタの更新 */ if (obs_data[count][0] >= DATA_MIN) { count++; } /* エラーを検出した際の処理 */ if (ret == EOF) { cout << "error" << endl; return -1; } } fclose(fp_d65); return 0; } /* ガウシアンのシフトを計算する関数 */ void makeGaussShift(vector<vector<double> >& shift_data) { double mu = 0; // 計算で使用するミュー double sigma = 0; // 計算で使用するシグマ double d_max = 0; // 生成したガウシアンの中の最大値 double w_length = 0; // 振幅を0-1の間でランダムにするために使用する /* 乱数のシード生成 */ srand((unsigned int)time(NULL)); /* 波形は10パターン生成するので10回でループする */ for (int i = 0; i < 10; i++) { mu = (double)DATA_MIN + ((double)DATA_MAX - (double)DATA_MIN) / 10 * i; sigma = 20 + (80 * (double)rand() / RAND_MAX); /* データ数だけ計算する */ for (int j = 0; j < DATA_ROW; j++) { shift_data[j][i] = 1 / (sqrt(2 * PI) * sigma) * exp(-pow(((double)(DATA_MIN + j) - mu), 2) / (2 * sigma * sigma)); /* 最大値を変数に格納する(更新する) */ if (d_max < shift_data[j][i]) { d_max = shift_data[j][i]; } } /* 生成したガウシアンを正規化し、振幅を0-1の間でランダムにする */ w_length = (double)rand() / RAND_MAX; // 0-1の間で乱数生成 for (int j = 0; j < DATA_ROW; j++) { shift_data[j][i] = shift_data[j][i] / d_max * w_length; } /* 最大値初期化 */ d_max = 0; } } /* vector型から配列へデータをコピーする関数 */ void cpyVecToArray(vector<vector<double> >& d65_data, vector<vector<double> >& obs_data, vector<vector<double> >& shift_data, double* d65, double* obs_x, double* obs_y, double* obs_z, double* gauss_data) { for (int i = 0; i < DATA_ROW; i++) { d65[i] = d65_data[i][1]; obs_x[i] = obs_data[i][1]; obs_y[i] = obs_data[i][2]; obs_z[i] = obs_data[i][3]; for (int j = 0; j < 10; j++) { int aPos = DATA_ROW * j + i; gauss_data[aPos] = shift_data[i][j]; } } } /* 総和計算の時に使用する変数を計算 */ int getRemain(void) { /* 余り */ int remain = 0; /* 余り計算 */ for (int i = 1; i < BLOCKSIZE; i *= 2) { remain = BLOCKSIZE - i; } /* 余り出力 */ return remain; } /* 積分計算カーネル */ template<int BLOCK_SIZE> __global__ void colorSim(double simNum,double *g_data,double *d65,double *obs_x,double *obs_y,double *obs_z,double *result,int remain) { /* CUDAアクセス用変数 */ int ix = threadIdx.x; int aPos = 0; /* どのガウシアンを決めるための変数 */ __shared__ int sim_order[10]; /* ガウシアン組み合わせの番号 */ __shared__ double sim_num; /* 結果を格納するシェアードメモリ */ __shared__ double calc_data[BLOCK_SIZE][3]; /* 足し合わせたガウシアンの最大値 */ __shared__ double g_max; g_max = 0; /* 足し合わせたガウシアンを格納する */ double gaussian = 0; /* 足し合わせたガウシアンを格納(最大値比較用) */ __shared__ double g_comp[BLOCK_SIZE]; /* 比較用シェアードメモリ初期化 */ g_comp[ix] = 0; /* sim_orderヘ値を入れる */ if (ix == 0) { sim_num = blockIdx.x + simNum; int count = 512; // カウンタ for (int i = 0; i < 10; i++) { if (sim_num >= count) { sim_num -= count; sim_order[i] = 1; } else { sim_order[i] = 0; } count = count / 2; } /*printf("%d %d %d %d %d %d %d %d %d %d\n", sim_order[0], sim_order[1], sim_order[2], sim_order[3], sim_order[4], sim_order[5], sim_order[6], sim_order[7], sim_order[8], sim_order[9] );*/ } /* ブロック内のスレッド同期 */ __syncthreads(); /* ガウシアンを足し合わせる */ for (int i = 0; i < 10; i++) { aPos = i * BLOCK_SIZE + ix; if (sim_order[i] == 1) { gaussian += g_data[aPos]; g_comp[ix] += g_data[aPos]; } } /* ブロック内のスレッド同期 */ __syncthreads(); /* 足し合わせたガウシアンの最大値を求める */ if (ix == 0) { for (int i = 0; i < BLOCK_SIZE; i++) { if (g_max < g_comp[i]) { g_max = g_comp[i]; } } } /* ブロック内のスレッド同期 */ __syncthreads(); /* g_max が1以上の場合、最大値が0.99になるように正規化 */ if (g_max >= 1) { gaussian = gaussian / g_max * 0.99; } /* ブロック内のスレッド同期 */ __syncthreads(); for (int i = 0; i < CALCNUM; i++) { /* シェアードメモリにデータ格納 */ calc_data[ix][0] = d65[ix] * obs_x[ix] * pow(gaussian, (0.01 * i)); calc_data[ix][1] = d65[ix] * obs_y[ix] * pow(gaussian, (0.01 * i)); calc_data[ix][2] = d65[ix] * obs_z[ix] * pow(gaussian, (0.01 * i)); /* ブロック同期 */ __syncthreads(); /* ブロックごとにリダクション処理(総和計算) */ /* 余りが0出ない場合 */ if (remain != 0) { /* 余った要素のシェアードメモリを加算する */ if (ix < remain) { calc_data[ix][0] += calc_data[BLOCK_SIZE - ix - 1][0]; calc_data[ix][1] += calc_data[BLOCK_SIZE - ix - 1][1]; calc_data[ix][2] += calc_data[BLOCK_SIZE - ix - 1][2]; } } /* 総和計算する */ if (BLOCK_SIZE >= 256) { if (ix < 128) { calc_data[ix][0] += calc_data[ix + 128][0]; calc_data[ix][1] += calc_data[ix + 128][1]; calc_data[ix][2] += calc_data[ix + 128][2]; }__syncthreads(); } if (BLOCK_SIZE >= 128) { if (ix < 64) { calc_data[ix][0] += calc_data[ix + 64][0]; calc_data[ix][1] += calc_data[ix + 64][1]; calc_data[ix][2] += calc_data[ix + 64][2]; }__syncthreads(); } if (BLOCK_SIZE >= 64) { if (ix < 32) { calc_data[ix][0] += calc_data[ix + 32][0]; calc_data[ix][1] += calc_data[ix + 32][1]; calc_data[ix][2] += calc_data[ix + 32][2]; } __syncthreads();} if (BLOCK_SIZE >= 32) { if (ix < 16) { calc_data[ix][0] += calc_data[ix + 16][0]; calc_data[ix][1] += calc_data[ix + 16][1]; calc_data[ix][2] += calc_data[ix + 16][2]; } __syncthreads(); } if (BLOCK_SIZE >= 16) { if (ix < 8) { calc_data[ix][0] += calc_data[ix + 8][0]; calc_data[ix][1] += calc_data[ix + 8][1]; calc_data[ix][2] += calc_data[ix + 8][2]; }__syncthreads(); } if (BLOCK_SIZE >= 8) { if (ix < 4) { calc_data[ix][0] += calc_data[ix + 4][0]; calc_data[ix][1] += calc_data[ix + 4][1]; calc_data[ix][2] += calc_data[ix + 4][2]; } __syncthreads(); } if (BLOCK_SIZE >= 4) { if (ix < 2) { calc_data[ix][0] += calc_data[ix + 2][0]; calc_data[ix][1] += calc_data[ix + 2][1]; calc_data[ix][2] += calc_data[ix + 2][2]; } __syncthreads(); } if (BLOCK_SIZE >= 2) { if (ix < 1) { calc_data[ix][0] += calc_data[ix + 1][0]; calc_data[ix][1] += calc_data[ix + 1][1]; calc_data[ix][2] += calc_data[ix + 1][2]; } __syncthreads(); } /*if (ix == 0) { for (int j = 1; j < BLOCK_SIZE; j++) { calc_data[ix][0] += calc_data[i][0]; calc_data[ix][1] += calc_data[i][1]; calc_data[ix][2] += calc_data[i][2]; } }*/ /* 値出力 */ if (ix == 0) { /* aPos更新 */ aPos = blockIdx.x * 3 * CALCNUM + i; //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][0]; /* aPos更新 */ aPos = blockIdx.x * 3 * CALCNUM + i + CALCNUM; //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][1]; /* aPos更新 */ aPos = blockIdx.x * 3 * CALCNUM + i + (2 * CALCNUM); //printf("%d %d\n", blockIdx.x,calc_data[ix]); result[aPos] = calc_data[0][2]; //printf("%.3lf %.3lf %.3lf\n", calc_data[0][0], calc_data[0][1], calc_data[0][2]); } /* ブロック同期 */ __syncthreads(); } } int main(void) { /* D65のデータを格納する配列 */ vector<vector<double> > d65_data(DATA_ROW, vector<double>(D65_COL, 0)); /*標準観測者のデータを格納する配列 */ vector<vector<double> > obs_data(DATA_ROW, vector<double>(OBS_COL, 0)); /*ガウシアンを10個格納する配列 */ vector<vector<double> > gauss_shift(DATA_ROW, vector<double>(10, 0)); /* 余り計算 */ int remain = getRemain(); /* データを入れる1次元配列 */ double* d65, * obs_x, * obs_y, * obs_z, * gauss_data, * result, * fin_result; d65 = new double[DATA_ROW]; obs_x= new double[DATA_ROW]; obs_y = new double[DATA_ROW]; obs_z = new double[DATA_ROW]; gauss_data = new double[DATA_ROW * 10]; result = new double[3 * DATANUM * CALCNUM]; fin_result = new double[3 * SIMNUM * CALCNUM]; /* CUDA用の変数 */ double* d_d65, * d_obs_x, * d_obs_y, * d_obs_z, * d_gauss_data, *d_result; char* d_sim_order; /* GPUメモリ確保 */ cudaMalloc((void**)&d_d65, DATA_ROW * sizeof(double)); cudaMalloc((void**)&d_obs_x, DATA_ROW * sizeof(double)); cudaMalloc((void**)&d_obs_y, DATA_ROW * sizeof(double)); cudaMalloc((void**)&d_obs_z, DATA_ROW * sizeof(double)); cudaMalloc((void**)&d_gauss_data, DATA_ROW * 10 * sizeof(double)); cudaMalloc((void**)&d_result, 3 * DATANUM * CALCNUM * sizeof(double)); /* ファイル読み込み関数実行 */ int f_result = getFileData(d65_data, obs_data); /* ガウシアン計算 */ makeGaussShift(gauss_shift); /* vectorを1次元配列へ変換 */ cpyVecToArray(d65_data, obs_data, gauss_shift,d65,obs_x,obs_y,obs_z,gauss_data); /* CUDAへのメモリコピー */ cudaMemcpy(d_d65, d65, DATA_ROW * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_obs_x, obs_x, DATA_ROW * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_obs_y, obs_y, DATA_ROW * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_obs_z, obs_z, DATA_ROW * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_gauss_data, gauss_data, DATA_ROW * 10 * sizeof(double), cudaMemcpyHostToDevice); int count = 0; for (int i = 0; i < (SIMNUM - DATANUM); i += DATANUM) { colorSim<DATA_ROW> << <DATANUM, DATA_ROW >> > ((i+200), d_gauss_data, d_d65, d_obs_x, d_obs_y, d_obs_z, d_result, remain); cudaDeviceSynchronize(); /* 結果のコピー */ cudaMemcpy(result, d_result, 3 * DATANUM * CALCNUM * sizeof(double), cudaMemcpyDeviceToHost); for (int j = 0; j < (3 * DATANUM * CALCNUM); j++) { int aPos = (count * 3 * DATANUM * CALCNUM) + j; fin_result[aPos] = result[j]; } count++; } /* 出力ファイル名 */ string fname = "sim_result.csv"; /* ファイル出力ストリーム */ ofstream o_file(fname); /* ファイルへの出力桁数指定 */ o_file << fixed << setprecision(3); /* ファイル書き込み */ //for (int i = 0; i < CALCNUM; i++) { // for (int j = 0; j < SIMNUM; j++) { // for (int k = 0; k < 3; k++) { // int apos = i + (3 * j + k) * CALCNUM; // o_file << fin_result[apos] << ","; // } // } // o_file << endl << flush; //} //for (int i = 0; i < SIMNUM; i++) { // for (int j = 0; j < 3; j++) { // for (int k = 0; k < CALCNUM; k++) { // int apos = (3 * i * CALCNUM) + (j * CALCNUM) + k; // o_file << fin_result[apos] << ","; // } // o_file << endl << flush; // } //} for (int i = 0; i < CALCNUM; i++) { for (int j = 0; j < SIMNUM; j++) { int apos = i + (3 * j) * CALCNUM; double X = fin_result[apos]; double Y = fin_result[apos + CALCNUM]; double Z = fin_result[apos + (2 * CALCNUM)]; double x = X / (X + Y + Z); double y = Y / (X + Y + Z); double z = Z / (X + Y + Z); o_file << x << "," << y << "," << z << ","; } o_file << endl << flush; } /* デバイスメモリ解放 */ cudaFree(d_d65); cudaFree(d_gauss_data); cudaFree(d_obs_x); cudaFree(d_obs_y); cudaFree(d_obs_z); cudaFree(d_result); /* ホストメモリ解放 */ delete[] d65; delete[] obs_x; delete[] obs_y; delete[] obs_z; delete[] gauss_data; delete[] result; delete[] fin_result; return 0; }
8297e84b12210bd34182a6aae5d7f87ff9a146b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "error.cuh" #include <math.h> #include <stdio.h> #ifdef USE_DP typedef double real; const real EPSILON = 1.0e-15; #else typedef float real; const real EPSILON = 1.0e-6f; #endif const int NUM_REPEATS = 10; const real a = 1.23; const real b = 2.34; const real c = 3.57; void __global__ add(const real *x, const real *y, real *z, const int N); void check(const real *z, const int N); int main(void) { const int N = 100000000; const int M = sizeof(real) * N; real *h_x = (real*) malloc(M); real *h_y = (real*) malloc(M); real *h_z = (real*) malloc(M); for (int n = 0; n < N; ++n) { h_x[n] = a; h_y[n] = b; } real *d_x, *d_y, *d_z; CHECK(hipMalloc((void **)&d_x, M)); CHECK(hipMalloc((void **)&d_y, M)); CHECK(hipMalloc((void **)&d_z, M)); CHECK(hipMemcpy(d_x, h_x, M, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_y, h_y, M, hipMemcpyHostToDevice)); const int block_size = 128; const int grid_size = (N + block_size - 1) / block_size; float t_sum = 0; float t2_sum = 0; for (int repeat = 0; repeat <= NUM_REPEATS; ++repeat) { hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); hipEventQuery(start); hipLaunchKernelGGL(( add), dim3(grid_size), dim3(block_size), 0, 0, d_x, d_y, d_z, N); CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); float elapsed_time; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); if (repeat > 0) { t_sum += elapsed_time; t2_sum += elapsed_time * elapsed_time; } CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); } const float t_ave = t_sum / NUM_REPEATS; const float t_err = sqrt(t2_sum / NUM_REPEATS - t_ave * t_ave); printf("Time = %g +- %g ms.\n", t_ave, t_err); CHECK(hipMemcpy(h_z, d_z, M, hipMemcpyDeviceToHost)); check(h_z, N); free(h_x); free(h_y); free(h_z); CHECK(hipFree(d_x)); CHECK(hipFree(d_y)); CHECK(hipFree(d_z)); return 0; } void __global__ add(const real *x, const real *y, real *z, const int N) { const int n = blockDim.x * blockIdx.x + threadIdx.x; if (n < N) { z[n] = x[n] + y[n]; } } void check(const real *z, const int N) { bool has_error = false; for (int n = 0; n < N; ++n) { if (fabs(z[n] - c) > EPSILON) { has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
8297e84b12210bd34182a6aae5d7f87ff9a146b1.cu
#include "error.cuh" #include <math.h> #include <stdio.h> #ifdef USE_DP typedef double real; const real EPSILON = 1.0e-15; #else typedef float real; const real EPSILON = 1.0e-6f; #endif const int NUM_REPEATS = 10; const real a = 1.23; const real b = 2.34; const real c = 3.57; void __global__ add(const real *x, const real *y, real *z, const int N); void check(const real *z, const int N); int main(void) { const int N = 100000000; const int M = sizeof(real) * N; real *h_x = (real*) malloc(M); real *h_y = (real*) malloc(M); real *h_z = (real*) malloc(M); for (int n = 0; n < N; ++n) { h_x[n] = a; h_y[n] = b; } real *d_x, *d_y, *d_z; CHECK(cudaMalloc((void **)&d_x, M)); CHECK(cudaMalloc((void **)&d_y, M)); CHECK(cudaMalloc((void **)&d_z, M)); CHECK(cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_y, h_y, M, cudaMemcpyHostToDevice)); const int block_size = 128; const int grid_size = (N + block_size - 1) / block_size; float t_sum = 0; float t2_sum = 0; for (int repeat = 0; repeat <= NUM_REPEATS; ++repeat) { cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); cudaEventQuery(start); add<<<grid_size, block_size>>>(d_x, d_y, d_z, N); CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); float elapsed_time; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); if (repeat > 0) { t_sum += elapsed_time; t2_sum += elapsed_time * elapsed_time; } CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); } const float t_ave = t_sum / NUM_REPEATS; const float t_err = sqrt(t2_sum / NUM_REPEATS - t_ave * t_ave); printf("Time = %g +- %g ms.\n", t_ave, t_err); CHECK(cudaMemcpy(h_z, d_z, M, cudaMemcpyDeviceToHost)); check(h_z, N); free(h_x); free(h_y); free(h_z); CHECK(cudaFree(d_x)); CHECK(cudaFree(d_y)); CHECK(cudaFree(d_z)); return 0; } void __global__ add(const real *x, const real *y, real *z, const int N) { const int n = blockDim.x * blockIdx.x + threadIdx.x; if (n < N) { z[n] = x[n] + y[n]; } } void check(const real *z, const int N) { bool has_error = false; for (int n = 0; n < N; ++n) { if (fabs(z[n] - c) > EPSILON) { has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
3f1cd2dfc8bb7a034b670dfa0ddccc484bc76835.hip
// !!! This is a file automatically generated by hipify!!! /** * 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size. */ #ifndef NI # define NI 256 //2048 #endif #ifndef NJ # define NJ 256 //2048 #endif #ifndef NK # define NK 256 //2048 #endif #ifndef NL # define NL 256 //2048 #endif /* Thread block dimensions */ #ifndef DIM_TRHEAD_BLOCK_X #define DIM_THREAD_BLOCK_X 8 //32 #endif #ifndef DIM_THREAD_BLOCK_Y #define DIM_THREAD_BLOCK_Y 8 #endif /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NI + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NL; i++) { for (j = 0; j < NJ; j++) { C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NL; i++) { for (j=0; j < NI; j++) { if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); hipGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == hipComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); hipSetDevice( gpgpu_device_id ); } __global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for (k = 0; k < NK; k++) { C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for (k = 0; k < NJ; k++) { E[i * NL + j] += C[i * NJ + k] * D[k * NL + j]; } } } void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j, k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = 0.0; for (k = 0; k < NK; ++k) { C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { E[i*NL + j] = 0.0; for (k = 0; k < NJ; ++k) { E[i*NL + j] += C[i*NJ + k] * D[k*NL + j]; } } } } void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu) { hipError_t error; double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; error=hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); // t_start = rtclock(); hipLaunchKernelGGL(( mm2_kernel1), dim3(grid1),dim3(block), 0, 0, A_gpu, B_gpu, C_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( mm2_kernel2), dim3(grid2),dim3(block), 0, 0, C_gpu, D_gpu, E_gpu); hipDeviceSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=hipMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); hipFree(D_gpu); hipFree(E_gpu); } int main(int argc, char** argv) { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; DATA_TYPE* C; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* E_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); srand(1); init_array(A, B, C, D); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm2Cuda(A, B, C, D, E, E_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_array(A, B, C, D); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm2_cpu(A, B, C, D, E); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(E, E_outputFromGpu); free(C); free(A); free(B); free(D); free(E); free(E_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
3f1cd2dfc8bb7a034b670dfa0ddccc484bc76835.cu
/** * 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size. */ #ifndef NI # define NI 256 //2048 #endif #ifndef NJ # define NJ 256 //2048 #endif #ifndef NK # define NK 256 //2048 #endif #ifndef NL # define NL 256 //2048 #endif /* Thread block dimensions */ #ifndef DIM_TRHEAD_BLOCK_X #define DIM_THREAD_BLOCK_X 8 //32 #endif #ifndef DIM_THREAD_BLOCK_Y #define DIM_THREAD_BLOCK_Y 8 #endif /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NI + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NL; i++) { for (j = 0; j < NJ; j++) { C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NL; i++) { for (j=0; j < NI; j++) { if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); cudaGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == cudaComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); cudaSetDevice( gpgpu_device_id ); } __global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for (k = 0; k < NK; k++) { C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for (k = 0; k < NJ; k++) { E[i * NL + j] += C[i * NJ + k] * D[k * NL + j]; } } } void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j, k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = 0.0; for (k = 0; k < NK; ++k) { C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { E[i*NL + j] = 0.0; for (k = 0; k < NJ; ++k) { E[i*NL + j] += C[i*NJ + k] * D[k*NL + j]; } } } } void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu) { cudaError_t error; double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; error=cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); // t_start = rtclock(); mm2_kernel1<<<grid1,block>>>(A_gpu, B_gpu, C_gpu); cudaThreadSynchronize(); mm2_kernel2<<<grid2,block>>>(C_gpu, D_gpu, E_gpu); cudaThreadSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=cudaMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); cudaFree(D_gpu); cudaFree(E_gpu); } int main(int argc, char** argv) { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; DATA_TYPE* C; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* E_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); srand(1); init_array(A, B, C, D); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm2Cuda(A, B, C, D, E, E_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_array(A, B, C, D); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm2_cpu(A, B, C, D, E); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(E, E_outputFromGpu); free(C); free(A); free(B); free(D); free(E); free(E_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
89b8f057267e62d2d708ceaa49b11300162171c3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // The block cuda file is just a wrapper for the kernels that will be launched in the work // function namespace gr { namespace howto { __global__ void apply_copy_kernel(const uint8_t* in, uint8_t* out, int batch_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int n = batch_size; if (i < n) { out[i] = in[i]; } } void apply_copy( const uint8_t* in, uint8_t* out, int grid_size, int block_size, hipStream_t stream) { int batch_size = block_size * grid_size; hipLaunchKernelGGL(( apply_copy_kernel), dim3(grid_size), dim3(block_size), 0, stream, in, out, batch_size); } void get_block_and_grid(int* minGrid, int* minBlock) { // https://developer.nvidia.com/blog/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/ hipOccupancyMaxPotentialBlockSize(minGrid, minBlock, apply_copy_kernel, 0, 0); } } // namespace howto } // namespace gr
89b8f057267e62d2d708ceaa49b11300162171c3.cu
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> // The block cuda file is just a wrapper for the kernels that will be launched in the work // function namespace gr { namespace howto { __global__ void apply_copy_kernel(const uint8_t* in, uint8_t* out, int batch_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int n = batch_size; if (i < n) { out[i] = in[i]; } } void apply_copy( const uint8_t* in, uint8_t* out, int grid_size, int block_size, cudaStream_t stream) { int batch_size = block_size * grid_size; apply_copy_kernel<<<grid_size, block_size, 0, stream>>>(in, out, batch_size); } void get_block_and_grid(int* minGrid, int* minBlock) { // https://developer.nvidia.com/blog/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/ cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, apply_copy_kernel, 0, 0); } } // namespace howto } // namespace gr
7eabebdb7324353aba2f3b3ec9636e53bcce3c48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" const int THREADS_PER_BLOCK_DIM = 8; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. int x = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK_DIM); int y = threadIdx.y + (blockIdx.y * THREADS_PER_BLOCK_DIM); // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( x >= numCols || y >= numRows ) { return; } float blurredValue = 0.0f; int pixLoc = (y*numCols) + x; for (int filterX = -filterWidth/2; filterX <= filterWidth/2; ++filterX) { for (int filterY = -filterWidth/2; filterY <= filterWidth/2; ++filterY) { int imageWindowX = min(max(x + filterX, 0), numCols-1); int imageWindowY = min(max(y + filterY, 0), numRows-1); int filterXAddress = filterX + filterWidth/2; int filterYAddress = filterY + filterWidth/2; int imageLoc = (imageWindowY * numCols) + imageWindowX; int filterLoc = (filterYAddress * filterWidth) + filterXAddress; float imageValue = static_cast<float>(inputChannel[imageLoc]); float filterValue = filter[filterLoc]; blurredValue += imageValue * filterValue; } } outputChannel[pixLoc] = blurredValue; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int x = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK_DIM); int y = threadIdx.y + (blockIdx.y * THREADS_PER_BLOCK_DIM); // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: if ( x >= numCols || y >= numRows ) { return; } int pixLoc = (y * numCols) + x; uchar4 pixInfo = inputImageRGBA[pixLoc]; redChannel[pixLoc] = pixInfo.x; greenChannel[pixLoc] = pixInfo.y; blueChannel[pixLoc] = pixInfo.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } //TODO: note the following globals! unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { // Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM, 1); // Compute correct grid size (i.e., number of blocks per kernel launch) // from the image size and and block size. const dim3 gridSize((numCols + THREADS_PER_BLOCK_DIM - 1)/THREADS_PER_BLOCK_DIM, (numRows + THREADS_PER_BLOCK_DIM - 1)/THREADS_PER_BLOCK_DIM); // Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
7eabebdb7324353aba2f3b3ec9636e53bcce3c48.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" const int THREADS_PER_BLOCK_DIM = 8; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. int x = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK_DIM); int y = threadIdx.y + (blockIdx.y * THREADS_PER_BLOCK_DIM); // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( x >= numCols || y >= numRows ) { return; } float blurredValue = 0.0f; int pixLoc = (y*numCols) + x; for (int filterX = -filterWidth/2; filterX <= filterWidth/2; ++filterX) { for (int filterY = -filterWidth/2; filterY <= filterWidth/2; ++filterY) { int imageWindowX = min(max(x + filterX, 0), numCols-1); int imageWindowY = min(max(y + filterY, 0), numRows-1); int filterXAddress = filterX + filterWidth/2; int filterYAddress = filterY + filterWidth/2; int imageLoc = (imageWindowY * numCols) + imageWindowX; int filterLoc = (filterYAddress * filterWidth) + filterXAddress; float imageValue = static_cast<float>(inputChannel[imageLoc]); float filterValue = filter[filterLoc]; blurredValue += imageValue * filterValue; } } outputChannel[pixLoc] = blurredValue; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int x = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK_DIM); int y = threadIdx.y + (blockIdx.y * THREADS_PER_BLOCK_DIM); // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: if ( x >= numCols || y >= numRows ) { return; } int pixLoc = (y * numCols) + x; uchar4 pixInfo = inputImageRGBA[pixLoc]; redChannel[pixLoc] = pixInfo.x; greenChannel[pixLoc] = pixInfo.y; blueChannel[pixLoc] = pixInfo.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } //TODO: note the following globals! unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { // Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(THREADS_PER_BLOCK_DIM, THREADS_PER_BLOCK_DIM, 1); // Compute correct grid size (i.e., number of blocks per kernel launch) // from the image size and and block size. const dim3 gridSize((numCols + THREADS_PER_BLOCK_DIM - 1)/THREADS_PER_BLOCK_DIM, (numRows + THREADS_PER_BLOCK_DIM - 1)/THREADS_PER_BLOCK_DIM); // Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
65c4f5105b14d46869a691b60646b857ba34e564.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // camera ray launch kernels // // ============================================================================= #include "chrono_sensor/optix/shaders/device_utils.h" extern "C" __global__ void __raygen__camera_pinhole() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const CameraParameters& camera = raygen->specific.camera; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_camera prd = default_camera_prd(); prd.use_gi = camera.use_gi; if (camera.use_gi) { prd.rng = camera.rng_buffer[image_index]; } unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); // Gamma correct the output color into sRGB color space float recip_gamma = 1 / camera.gamma; camera.frame_buffer[image_index] = make_half4(pow(prd.color.x, recip_gamma), pow(prd.color.y, recip_gamma), pow(prd.color.z, recip_gamma), 1.f); if (camera.use_gi) { camera.albedo_buffer[image_index] = make_half4(prd.albedo.x, prd.albedo.y, prd.albedo.z, 0.f); float screen_n_x = -Dot(left, prd.normal); // screen space (x right) float screen_n_y = Dot(up, prd.normal); // screen space (y up) float screen_n_z = -Dot(forward, prd.normal); // screen space (-z forward) camera.normal_buffer[image_index] = make_half4(screen_n_x, screen_n_y, screen_n_z, 0.f); } } /// Camera ray generation program using an FOV lens model extern "C" __global__ void __raygen__camera_fov_lens() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const CameraParameters& camera = raygen->specific.camera; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio if (abs(d.x) > 1e-5 || abs(d.y) > 1e-5) { float r1 = sqrtf(d.x * d.x + d.y * d.y); float r2 = tanf(r1 * tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); float scaled_extent = tanf(tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); d.x = d.x * (r2 / r1) / scaled_extent; d.y = d.y * (r2 / r1) / scaled_extent; } const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_camera prd = default_camera_prd(); prd.use_gi = camera.use_gi; if (camera.use_gi) { prd.rng = camera.rng_buffer[image_index]; } unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); // Gamma correct the output color into sRGB color space float gamma = camera.gamma; camera.frame_buffer[image_index] = make_half4(pow(prd.color.x, 1.0f / gamma), pow(prd.color.y, 1.0f / gamma), pow(prd.color.z, 1.0f / gamma), 1.f); if (camera.use_gi) { camera.albedo_buffer[image_index] = make_half4(prd.albedo.x, prd.albedo.y, prd.albedo.z, 0.f); float screen_n_x = -Dot(left, prd.normal); // screen space (x right) float screen_n_y = Dot(up, prd.normal); // screen space (y up) float screen_n_z = -Dot(forward, prd.normal); // screen space (-z forward) camera.normal_buffer[image_index] = make_half4(screen_n_x, screen_n_y, screen_n_z, 0.f); } } extern "C" __global__ void __raygen__segmentation_pinhole() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const SemanticCameraParameters& camera = raygen->specific.segmentation; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_semantic prd = default_semantic_prd(); unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)SEGMENTATION_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); camera.frame_buffer[image_index].x = prd.class_id; camera.frame_buffer[image_index].y = prd.instance_id; } /// Camera ray generation program using an FOV lens model extern "C" __global__ void __raygen__segmentation_fov_lens() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const SemanticCameraParameters& camera = raygen->specific.segmentation; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio if (abs(d.x) > 1e-5 || abs(d.y) > 1e-5) { float r1 = sqrtf(d.x * d.x + d.y * d.y); float r2 = tanf(r1 * tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); float scaled_extent = tanf(tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); d.x = d.x * (r2 / r1) / scaled_extent; d.y = d.y * (r2 / r1) / scaled_extent; } const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_semantic prd = default_semantic_prd(); unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)SEGMENTATION_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); camera.frame_buffer[image_index].x = prd.class_id; camera.frame_buffer[image_index].y = prd.instance_id; }
65c4f5105b14d46869a691b60646b857ba34e564.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // camera ray launch kernels // // ============================================================================= #include "chrono_sensor/optix/shaders/device_utils.h" extern "C" __global__ void __raygen__camera_pinhole() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const CameraParameters& camera = raygen->specific.camera; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_camera prd = default_camera_prd(); prd.use_gi = camera.use_gi; if (camera.use_gi) { prd.rng = camera.rng_buffer[image_index]; } unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); // Gamma correct the output color into sRGB color space float recip_gamma = 1 / camera.gamma; camera.frame_buffer[image_index] = make_half4(pow(prd.color.x, recip_gamma), pow(prd.color.y, recip_gamma), pow(prd.color.z, recip_gamma), 1.f); if (camera.use_gi) { camera.albedo_buffer[image_index] = make_half4(prd.albedo.x, prd.albedo.y, prd.albedo.z, 0.f); float screen_n_x = -Dot(left, prd.normal); // screen space (x right) float screen_n_y = Dot(up, prd.normal); // screen space (y up) float screen_n_z = -Dot(forward, prd.normal); // screen space (-z forward) camera.normal_buffer[image_index] = make_half4(screen_n_x, screen_n_y, screen_n_z, 0.f); } } /// Camera ray generation program using an FOV lens model extern "C" __global__ void __raygen__camera_fov_lens() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const CameraParameters& camera = raygen->specific.camera; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio if (abs(d.x) > 1e-5 || abs(d.y) > 1e-5) { float r1 = sqrtf(d.x * d.x + d.y * d.y); float r2 = tanf(r1 * tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); float scaled_extent = tanf(tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); d.x = d.x * (r2 / r1) / scaled_extent; d.y = d.y * (r2 / r1) / scaled_extent; } const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_camera prd = default_camera_prd(); prd.use_gi = camera.use_gi; if (camera.use_gi) { prd.rng = camera.rng_buffer[image_index]; } unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); // Gamma correct the output color into sRGB color space float gamma = camera.gamma; camera.frame_buffer[image_index] = make_half4(pow(prd.color.x, 1.0f / gamma), pow(prd.color.y, 1.0f / gamma), pow(prd.color.z, 1.0f / gamma), 1.f); if (camera.use_gi) { camera.albedo_buffer[image_index] = make_half4(prd.albedo.x, prd.albedo.y, prd.albedo.z, 0.f); float screen_n_x = -Dot(left, prd.normal); // screen space (x right) float screen_n_y = Dot(up, prd.normal); // screen space (y up) float screen_n_z = -Dot(forward, prd.normal); // screen space (-z forward) camera.normal_buffer[image_index] = make_half4(screen_n_x, screen_n_y, screen_n_z, 0.f); } } extern "C" __global__ void __raygen__segmentation_pinhole() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const SemanticCameraParameters& camera = raygen->specific.segmentation; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_semantic prd = default_semantic_prd(); unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)SEGMENTATION_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); camera.frame_buffer[image_index].x = prd.class_id; camera.frame_buffer[image_index].y = prd.instance_id; } /// Camera ray generation program using an FOV lens model extern "C" __global__ void __raygen__segmentation_fov_lens() { const RaygenParameters* raygen = (RaygenParameters*)optixGetSbtDataPointer(); const SemanticCameraParameters& camera = raygen->specific.segmentation; const uint3 idx = optixGetLaunchIndex(); const uint3 screen = optixGetLaunchDimensions(); const unsigned int image_index = screen.x * idx.y + idx.x; float2 d = (make_float2(idx.x, idx.y) + make_float2(0.5, 0.5)) / make_float2(screen.x, screen.y) * 2.f - make_float2(1.f); d.y *= (float)(screen.y) / (float)(screen.x); // correct for the aspect ratio if (abs(d.x) > 1e-5 || abs(d.y) > 1e-5) { float r1 = sqrtf(d.x * d.x + d.y * d.y); float r2 = tanf(r1 * tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); float scaled_extent = tanf(tanf(camera.hFOV / 2.0)) / tanf(camera.hFOV / 2.0); d.x = d.x * (r2 / r1) / scaled_extent; d.y = d.y * (r2 / r1) / scaled_extent; } const float t_frac = 0; // 0-1 between start and end time of the camera (chosen here) const float t_traverse = raygen->t0 + t_frac * (raygen->t1 - raygen->t0); // simulation time when ray is sent float3 ray_origin = lerp(raygen->pos0, raygen->pos1, t_frac); float4 ray_quat = nlerp(raygen->rot0, raygen->rot1, t_frac); const float h_factor = camera.hFOV / CUDART_PI_F * 2.0; float3 forward; float3 left; float3 up; basis_from_quaternion(ray_quat, forward, left, up); float3 ray_direction = normalize(forward - d.x * left * h_factor + d.y * up * h_factor); PerRayData_semantic prd = default_semantic_prd(); unsigned int opt1; unsigned int opt2; pointer_as_ints(&prd, opt1, opt2); unsigned int raytype = (unsigned int)SEGMENTATION_RAY_TYPE; optixTrace(params.root, ray_origin, ray_direction, params.scene_epsilon, 1e16f, t_traverse, OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype); camera.frame_buffer[image_index].x = prd.class_id; camera.frame_buffer[image_index].y = prd.instance_id; }
6041b1b87e4aa4d08eeaef6ac5f3caa219f47e48.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <stdio.h> #include <time.h> #include <math.h> #define gpuCheck(f) { gpuCheckFunc((f), __FILE__, __LINE__); } inline void gpuCheckFunc(hipError_t err, const char *file, int line){ if (err != hipSuccess) { fprintf(stderr, "CUDA error (%s:%d): %s\n", file, line, hipGetErrorString(err)); exit(1); } } template <typename scalar_t> __device__ scalar_t reduce_add_block(scalar_t x, scalar_t *s_tmp) { // First reduce across the warp: for (int m = 1; m < 32; m <<= 1){ x += __shfl_xor_sync(0xffffffff, x, m); } // Now reduce across the block if (threadIdx.x % 32 == 0) { s_tmp[threadIdx.x / 32] = x; } __syncthreads(); if (threadIdx.x < blockDim.x / 32) { x = s_tmp[threadIdx.x]; } else { x = 0.0; } for (int m = 1; m < blockDim.x / 32; m <<= 1){ x += __shfl_xor_sync(0xffffffff, x, m); } return x; } __device__ void reduce_add_global(float x, float *s_tmp, float *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAdd(g_out, x); } } #if __CUDA_ARCH__ >= 600 __device__ void reduce_add_global(double x, double *s_tmp, double *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAdd(g_out, x); } } #else __device__ double atomicAddDouble(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ void reduce_add_global(double x, double *s_tmp, double *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAddDouble(g_out, x); } } #endif // Saturates the GPU global memory bandwidth with very low utilization of the ALUs. // To get better efficiency we would need to fuse multiple layers together to // reduce the amount of GPU memory loads and stores. template <typename scalar_t> __global__ void cuda_butterfly_forward_slow_kernel( const scalar_t *data_in, const scalar_t *angles, scalar_t *data_out, int data_stride, int half_width ) { // Load the angle for this thread's switch, and compute the corresponding weights. scalar_t angle = angles[blockIdx.y]; scalar_t a = cos(angle); scalar_t b = sin(angle); // Load the input data from GPU global memory int data_idx_in = 2 * blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; scalar_t x0 = data_in[data_idx_in]; scalar_t y0 = data_in[data_idx_in + data_stride]; // Compute the output data scalar_t x1 = a * x0 + b * y0; scalar_t y1 = -b * x0 + a * y0; // Write the output data to GPU global memory int data_idx_out = blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; data_out[data_idx_out] = x1; data_out[data_idx_out + data_stride * half_width] = y1; } template <typename scalar_t> __global__ void cuda_butterfly_backward_slow_kernel( const scalar_t *data_in, const scalar_t *angles, const scalar_t *grad_in, scalar_t *grad_out, scalar_t *grad_angles_accum, int data_stride, int half_width ) { // Load the angle for this thread's switch, and compute the corresponding weights. scalar_t angle = angles[blockIdx.y]; scalar_t a = cos(angle); scalar_t b = sin(angle); // Load the input gradient int data_idx_in = blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; scalar_t dx1 = grad_in[data_idx_in]; scalar_t dy1 = grad_in[data_idx_in + data_stride * half_width]; // Compute the output gradient for continuing backpropagation into earlier layers scalar_t dx0 = a * dx1 - b * dy1; scalar_t dy0 = b * dx1 + a * dy1; // Write the output gradient to GPU global memory int data_idx_out = 2 * blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; grad_out[data_idx_out] = dx0; grad_out[data_idx_out + data_stride] = dy0; // Accumulate the gradient for the angles in the current layer __shared__ scalar_t tmp[32]; scalar_t x1 = data_in[data_idx_in]; scalar_t y1 = data_in[data_idx_in + data_stride * half_width]; scalar_t g = y1*dx1 - x1*dy1; reduce_add_global(g, tmp, &grad_angles_accum[blockIdx.y]); } void cuda_butterfly_forward_slow(at::Tensor data_in, at::Tensor angles, at::Tensor data_out) { int dimBlock = 256; dim3 dimGrid(data_in.size(1) / dimBlock, angles.size(0)); AT_DISPATCH_FLOATING_TYPES(data_in.type(), "test_cuda_double", ([&] { hipLaunchKernelGGL(( cuda_butterfly_forward_slow_kernel<scalar_t>), dim3(dimGrid), dim3(dimBlock), 0, 0, data_in.data<scalar_t>(), angles.data<scalar_t>(), data_out.data<scalar_t>(), data_in.size(1), data_in.size(0) / 2 ); gpuCheck( hipGetLastError() ) })); } void cuda_butterfly_backward_slow( at::Tensor data_in, at::Tensor angles, at::Tensor grad_in, at::Tensor grad_out, at::Tensor grad_angles_accum ) { int dimBlock = 256; dim3 dimGrid(data_in.size(1) / dimBlock, angles.size(0)); AT_DISPATCH_FLOATING_TYPES(data_in.type(), "test_cuda_double", ([&] { hipLaunchKernelGGL(( cuda_butterfly_backward_slow_kernel<scalar_t>), dim3(dimGrid), dim3(dimBlock), 0, 0, data_in.data<scalar_t>(), angles.data<scalar_t>(), grad_in.data<scalar_t>(), grad_out.data<scalar_t>(), grad_angles_accum.data<scalar_t>(), data_in.size(1), data_in.size(0) / 2 ); gpuCheck( hipGetLastError() ) })); }
6041b1b87e4aa4d08eeaef6ac5f3caa219f47e48.cu
#include <ATen/ATen.h> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <stdio.h> #include <time.h> #include <math.h> #define gpuCheck(f) { gpuCheckFunc((f), __FILE__, __LINE__); } inline void gpuCheckFunc(cudaError_t err, const char *file, int line){ if (err != cudaSuccess) { fprintf(stderr, "CUDA error (%s:%d): %s\n", file, line, cudaGetErrorString(err)); exit(1); } } template <typename scalar_t> __device__ scalar_t reduce_add_block(scalar_t x, scalar_t *s_tmp) { // First reduce across the warp: for (int m = 1; m < 32; m <<= 1){ x += __shfl_xor_sync(0xffffffff, x, m); } // Now reduce across the block if (threadIdx.x % 32 == 0) { s_tmp[threadIdx.x / 32] = x; } __syncthreads(); if (threadIdx.x < blockDim.x / 32) { x = s_tmp[threadIdx.x]; } else { x = 0.0; } for (int m = 1; m < blockDim.x / 32; m <<= 1){ x += __shfl_xor_sync(0xffffffff, x, m); } return x; } __device__ void reduce_add_global(float x, float *s_tmp, float *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAdd(g_out, x); } } #if __CUDA_ARCH__ >= 600 __device__ void reduce_add_global(double x, double *s_tmp, double *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAdd(g_out, x); } } #else __device__ double atomicAddDouble(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ void reduce_add_global(double x, double *s_tmp, double *g_out) { x = reduce_add_block(x, s_tmp); if (threadIdx.x == 0) { atomicAddDouble(g_out, x); } } #endif // Saturates the GPU global memory bandwidth with very low utilization of the ALUs. // To get better efficiency we would need to fuse multiple layers together to // reduce the amount of GPU memory loads and stores. template <typename scalar_t> __global__ void cuda_butterfly_forward_slow_kernel( const scalar_t *data_in, const scalar_t *angles, scalar_t *data_out, int data_stride, int half_width ) { // Load the angle for this thread's switch, and compute the corresponding weights. scalar_t angle = angles[blockIdx.y]; scalar_t a = cos(angle); scalar_t b = sin(angle); // Load the input data from GPU global memory int data_idx_in = 2 * blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; scalar_t x0 = data_in[data_idx_in]; scalar_t y0 = data_in[data_idx_in + data_stride]; // Compute the output data scalar_t x1 = a * x0 + b * y0; scalar_t y1 = -b * x0 + a * y0; // Write the output data to GPU global memory int data_idx_out = blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; data_out[data_idx_out] = x1; data_out[data_idx_out + data_stride * half_width] = y1; } template <typename scalar_t> __global__ void cuda_butterfly_backward_slow_kernel( const scalar_t *data_in, const scalar_t *angles, const scalar_t *grad_in, scalar_t *grad_out, scalar_t *grad_angles_accum, int data_stride, int half_width ) { // Load the angle for this thread's switch, and compute the corresponding weights. scalar_t angle = angles[blockIdx.y]; scalar_t a = cos(angle); scalar_t b = sin(angle); // Load the input gradient int data_idx_in = blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; scalar_t dx1 = grad_in[data_idx_in]; scalar_t dy1 = grad_in[data_idx_in + data_stride * half_width]; // Compute the output gradient for continuing backpropagation into earlier layers scalar_t dx0 = a * dx1 - b * dy1; scalar_t dy0 = b * dx1 + a * dy1; // Write the output gradient to GPU global memory int data_idx_out = 2 * blockIdx.y * data_stride + threadIdx.x + blockDim.x * blockIdx.x; grad_out[data_idx_out] = dx0; grad_out[data_idx_out + data_stride] = dy0; // Accumulate the gradient for the angles in the current layer __shared__ scalar_t tmp[32]; scalar_t x1 = data_in[data_idx_in]; scalar_t y1 = data_in[data_idx_in + data_stride * half_width]; scalar_t g = y1*dx1 - x1*dy1; reduce_add_global(g, tmp, &grad_angles_accum[blockIdx.y]); } void cuda_butterfly_forward_slow(at::Tensor data_in, at::Tensor angles, at::Tensor data_out) { int dimBlock = 256; dim3 dimGrid(data_in.size(1) / dimBlock, angles.size(0)); AT_DISPATCH_FLOATING_TYPES(data_in.type(), "test_cuda_double", ([&] { cuda_butterfly_forward_slow_kernel<scalar_t><<<dimGrid, dimBlock>>>( data_in.data<scalar_t>(), angles.data<scalar_t>(), data_out.data<scalar_t>(), data_in.size(1), data_in.size(0) / 2 ); gpuCheck( cudaGetLastError() ) })); } void cuda_butterfly_backward_slow( at::Tensor data_in, at::Tensor angles, at::Tensor grad_in, at::Tensor grad_out, at::Tensor grad_angles_accum ) { int dimBlock = 256; dim3 dimGrid(data_in.size(1) / dimBlock, angles.size(0)); AT_DISPATCH_FLOATING_TYPES(data_in.type(), "test_cuda_double", ([&] { cuda_butterfly_backward_slow_kernel<scalar_t><<<dimGrid, dimBlock>>>( data_in.data<scalar_t>(), angles.data<scalar_t>(), grad_in.data<scalar_t>(), grad_out.data<scalar_t>(), grad_angles_accum.data<scalar_t>(), data_in.size(1), data_in.size(0) / 2 ); gpuCheck( cudaGetLastError() ) })); }
fd8546559c243b315ae5ea2731a792e53ab0bc29.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <time.h> #include "cuStopwatch.cu" #define SHIFT 13 __global__ void transpose_1(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) { uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y; if((xid < h)&&(yid < w)){ mat_trans[yid + w * xid] = mat[xid + w * yid]; } return; } __global__ void transpose_2(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) { uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y; if((xid < h)&&(yid < w)){ mat_trans[xid + h * yid] = mat[yid + h * xid]; } return; } void randgen(uint32_t* arr, size_t count){ uint32_t state = time(NULL); for(uint32_t i = 0; i < count; i++){ state ^= state << 13; state ^= state >> 17; state ^= state << 5; arr[i] = state; } return; } int main() { // Allocate memory, filling in random data and transfer to device uint32_t *mat_host, *mat_dev, *mat_res_dev; const uint32_t mat_size = 1 << (SHIFT * 2); const uint32_t mat_side = 1 << SHIFT; hipHostMalloc((void**)&mat_host, mat_size*sizeof(uint32_t), hipHostMallocDefault); hipMalloc((void**)&mat_dev, mat_size*sizeof(uint32_t)); hipMalloc((void**)&mat_res_dev, mat_size*sizeof(uint32_t)); printf("Copying data to device\n"); randgen(mat_host, mat_size); hipMemcpy(mat_dev, mat_host, mat_size*sizeof(uint32_t), hipMemcpyHostToDevice); hipHostFree(mat_host); // Performing matrix transposition on a 2^13 * 2^13 matrix dim3 blocksize(32, 32); dim3 gridsize(mat_side / 32, mat_side / 32); printf("First method\n"); cuStopwatch sw1; sw1.start(); hipLaunchKernelGGL(( transpose_1), dim3(gridsize), dim3(blocksize), 0, 0, mat_dev, mat_res_dev, mat_side, mat_side); printf("%.4fms\n", sw1.stop()); printf("\nSecond method\n"); cuStopwatch sw2; sw2.start(); hipLaunchKernelGGL(( transpose_2), dim3(gridsize), dim3(blocksize), 0, 0, mat_dev, mat_res_dev, mat_side, mat_side); printf("%.4fms\n", sw2.stop()); // Free memory hipFree(mat_dev); hipFree(mat_res_dev); return 0; }
fd8546559c243b315ae5ea2731a792e53ab0bc29.cu
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> #include <time.h> #include "cuStopwatch.cu" #define SHIFT 13 __global__ void transpose_1(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) { uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y; if((xid < h)&&(yid < w)){ mat_trans[yid + w * xid] = mat[xid + w * yid]; } return; } __global__ void transpose_2(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) { uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y; if((xid < h)&&(yid < w)){ mat_trans[xid + h * yid] = mat[yid + h * xid]; } return; } void randgen(uint32_t* arr, size_t count){ uint32_t state = time(NULL); for(uint32_t i = 0; i < count; i++){ state ^= state << 13; state ^= state >> 17; state ^= state << 5; arr[i] = state; } return; } int main() { // Allocate memory, filling in random data and transfer to device uint32_t *mat_host, *mat_dev, *mat_res_dev; const uint32_t mat_size = 1 << (SHIFT * 2); const uint32_t mat_side = 1 << SHIFT; cudaHostAlloc((void**)&mat_host, mat_size*sizeof(uint32_t), cudaHostAllocDefault); cudaMalloc((void**)&mat_dev, mat_size*sizeof(uint32_t)); cudaMalloc((void**)&mat_res_dev, mat_size*sizeof(uint32_t)); printf("Copying data to device\n"); randgen(mat_host, mat_size); cudaMemcpy(mat_dev, mat_host, mat_size*sizeof(uint32_t), cudaMemcpyHostToDevice); cudaFreeHost(mat_host); // Performing matrix transposition on a 2^13 * 2^13 matrix dim3 blocksize(32, 32); dim3 gridsize(mat_side / 32, mat_side / 32); printf("First method\n"); cuStopwatch sw1; sw1.start(); transpose_1<<<gridsize, blocksize>>>(mat_dev, mat_res_dev, mat_side, mat_side); printf("%.4fms\n", sw1.stop()); printf("\nSecond method\n"); cuStopwatch sw2; sw2.start(); transpose_2<<<gridsize, blocksize>>>(mat_dev, mat_res_dev, mat_side, mat_side); printf("%.4fms\n", sw2.stop()); // Free memory cudaFree(mat_dev); cudaFree(mat_res_dev); return 0; }
c564a44175bb332f32a8cca649e727fe4f5bc65f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergeqmr.cu, normal z -> d, Mon Jun 25 18:24:25 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_dqmr_1_kernel( int num_rows, int num_cols, double rho, double psi, double *y, double *z, double *v, double *w ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double ytmp = y[ i+j*num_rows ] / rho; y[ i+j*num_rows ] = ytmp; v[ i+j*num_rows ] = ytmp; double ztmp = z[ i+j*num_rows ] / psi; z[ i+j*num_rows ] = ztmp; w[ i+j*num_rows ] = ztmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] rho double scalar @param[in] psi double scalar @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_1( magma_int_t num_rows, magma_int_t num_cols, double rho, double psi, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi, y, z, v, w ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_2_kernel( int num_rows, int num_cols, double pde, double rde, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr p, magmaDouble_ptr q ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ]; q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: p = y - pde * p q = z - rde * q Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] pde double scalar @param[in] rde double scalar @param[in] y magmaDouble_ptr vector @param[in] z magmaDouble_ptr vector @param[in,out] p magmaDouble_ptr vector @param[in,out] q magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_2( magma_int_t num_rows, magma_int_t num_cols, double pde, double rde, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr p, magmaDouble_ptr q, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, pde, rde, y, z, p, q ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_3_kernel( int num_rows, int num_cols, double beta, double *pt, double *v, double *y ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp; y[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = pt - beta * v y = v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] pt magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] y magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_3( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr pt, magmaDouble_ptr v, magmaDouble_ptr y, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, y ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_4_kernel( int num_rows, int num_cols, double eta, double *p, double *pt, double *d, double *s, double *x, double *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmpd = eta * p[ i+j*num_rows ]; d[ i+j*num_rows ] = tmpd; x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd; double tmps = eta * pt[ i+j*num_rows ]; s[ i+j*num_rows ] = tmps; r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps; } } } /** Purpose ------- Mergels multiple operations into one kernel: d = eta * p; s = eta * pt; x = x + d; r = r - s; Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] p magmaDouble_ptr vector @param[in] pt magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] s magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_4( magma_int_t num_rows, magma_int_t num_cols, double eta, magmaDouble_ptr p, magmaDouble_ptr pt, magmaDouble_ptr d, magmaDouble_ptr s, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, p, pt, d, s, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_5_kernel( int num_rows, int num_cols, double eta, double pds, double *p, double *pt, double *d, double *s, double *x, double *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ]; d[ i+j*num_rows ] = tmpd; x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd; double tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ]; s[ i+j*num_rows ] = tmps; r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps; } } } /** Purpose ------- Mergels multiple operations into one kernel: d = eta * p + pds * d; s = eta * pt + pds * s; x = x + d; r = r - s; Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] pds double scalar @param[in] p magmaDouble_ptr vector @param[in] pt magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] s magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_5( magma_int_t num_rows, magma_int_t num_cols, double eta, double pds, magmaDouble_ptr p, magmaDouble_ptr pt, magmaDouble_ptr d, magmaDouble_ptr s, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, pds, p, pt, d, s, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_6_kernel( int num_rows, int num_cols, double beta, double rho, double psi, double *y, double *z, double *v, double *w, double *wt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double wttmp = wt[ i+j*num_rows ] - MAGMA_D_CONJ( beta ) * w[ i+j*num_rows ]; wt[ i+j*num_rows ] = wttmp; double ztmp = wttmp / psi; z[ i+j*num_rows ] = ztmp; w[ i+j*num_rows ] = ztmp; double ytmp = y[ i+j*num_rows ] / rho; y[ i+j*num_rows ] = ytmp; v[ i+j*num_rows ] = ytmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: wt = wt - conj(beta) * w v = y / rho y = y / rho w = wt / psi z = wt / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] rho double scalar @param[in] psi double scalar @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] wt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_6( magma_int_t num_rows, magma_int_t num_cols, double beta, double rho, double psi, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magmaDouble_ptr wt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_6_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, rho, psi, y, z, v, w, wt ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_7_kernel( int num_rows, int num_cols, double beta, double *pt, double *v, double *vt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ]; vt[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: vt = pt - beta * v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] pt magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] vt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_7( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr pt, magmaDouble_ptr v, magmaDouble_ptr vt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_7_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, vt ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_8_kernel( int num_rows, int num_cols, double rho, double psi, double *vt, double *wt, double *y, double *z, double *v, double *w ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho; v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho; z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi; w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] rho double scalar @param[in] psi double scalar @param[in] vt magmaDouble_ptr vector @param[in] wt magmaDouble_ptr vector @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_8( magma_int_t num_rows, magma_int_t num_cols, double rho, double psi, magmaDouble_ptr vt, magmaDouble_ptr wt, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dqmr_8_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi, vt, wt, y, z, v, w ); return MAGMA_SUCCESS; }
c564a44175bb332f32a8cca649e727fe4f5bc65f.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergeqmr.cu, normal z -> d, Mon Jun 25 18:24:25 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_dqmr_1_kernel( int num_rows, int num_cols, double rho, double psi, double *y, double *z, double *v, double *w ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double ytmp = y[ i+j*num_rows ] / rho; y[ i+j*num_rows ] = ytmp; v[ i+j*num_rows ] = ytmp; double ztmp = z[ i+j*num_rows ] / psi; z[ i+j*num_rows ] = ztmp; w[ i+j*num_rows ] = ztmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] rho double scalar @param[in] psi double scalar @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_1( magma_int_t num_rows, magma_int_t num_cols, double rho, double psi, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi, y, z, v, w ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_2_kernel( int num_rows, int num_cols, double pde, double rde, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr p, magmaDouble_ptr q ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ]; q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: p = y - pde * p q = z - rde * q Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] pde double scalar @param[in] rde double scalar @param[in] y magmaDouble_ptr vector @param[in] z magmaDouble_ptr vector @param[in,out] p magmaDouble_ptr vector @param[in,out] q magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_2( magma_int_t num_rows, magma_int_t num_cols, double pde, double rde, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr p, magmaDouble_ptr q, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, pde, rde, y, z, p, q ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_3_kernel( int num_rows, int num_cols, double beta, double *pt, double *v, double *y ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp; y[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = pt - beta * v y = v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] pt magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] y magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_3( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr pt, magmaDouble_ptr v, magmaDouble_ptr y, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, y ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_4_kernel( int num_rows, int num_cols, double eta, double *p, double *pt, double *d, double *s, double *x, double *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmpd = eta * p[ i+j*num_rows ]; d[ i+j*num_rows ] = tmpd; x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd; double tmps = eta * pt[ i+j*num_rows ]; s[ i+j*num_rows ] = tmps; r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps; } } } /** Purpose ------- Mergels multiple operations into one kernel: d = eta * p; s = eta * pt; x = x + d; r = r - s; Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] p magmaDouble_ptr vector @param[in] pt magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] s magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_4( magma_int_t num_rows, magma_int_t num_cols, double eta, magmaDouble_ptr p, magmaDouble_ptr pt, magmaDouble_ptr d, magmaDouble_ptr s, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, p, pt, d, s, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_5_kernel( int num_rows, int num_cols, double eta, double pds, double *p, double *pt, double *d, double *s, double *x, double *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ]; d[ i+j*num_rows ] = tmpd; x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd; double tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ]; s[ i+j*num_rows ] = tmps; r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps; } } } /** Purpose ------- Mergels multiple operations into one kernel: d = eta * p + pds * d; s = eta * pt + pds * s; x = x + d; r = r - s; Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] pds double scalar @param[in] p magmaDouble_ptr vector @param[in] pt magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] s magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_5( magma_int_t num_rows, magma_int_t num_cols, double eta, double pds, magmaDouble_ptr p, magmaDouble_ptr pt, magmaDouble_ptr d, magmaDouble_ptr s, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, pds, p, pt, d, s, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_6_kernel( int num_rows, int num_cols, double beta, double rho, double psi, double *y, double *z, double *v, double *w, double *wt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double wttmp = wt[ i+j*num_rows ] - MAGMA_D_CONJ( beta ) * w[ i+j*num_rows ]; wt[ i+j*num_rows ] = wttmp; double ztmp = wttmp / psi; z[ i+j*num_rows ] = ztmp; w[ i+j*num_rows ] = ztmp; double ytmp = y[ i+j*num_rows ] / rho; y[ i+j*num_rows ] = ytmp; v[ i+j*num_rows ] = ytmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: wt = wt - conj(beta) * w v = y / rho y = y / rho w = wt / psi z = wt / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] rho double scalar @param[in] psi double scalar @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] wt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_6( magma_int_t num_rows, magma_int_t num_cols, double beta, double rho, double psi, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magmaDouble_ptr wt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_6_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, rho, psi, y, z, v, w, wt ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_7_kernel( int num_rows, int num_cols, double beta, double *pt, double *v, double *vt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ]; vt[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: vt = pt - beta * v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] pt magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] vt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_7( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr pt, magmaDouble_ptr v, magmaDouble_ptr vt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_7_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, vt ); return MAGMA_SUCCESS; } __global__ void magma_dqmr_8_kernel( int num_rows, int num_cols, double rho, double psi, double *vt, double *wt, double *y, double *z, double *v, double *w ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho; v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho; z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi; w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] rho double scalar @param[in] psi double scalar @param[in] vt magmaDouble_ptr vector @param[in] wt magmaDouble_ptr vector @param[in,out] y magmaDouble_ptr vector @param[in,out] z magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dqmr_8( magma_int_t num_rows, magma_int_t num_cols, double rho, double psi, magmaDouble_ptr vt, magmaDouble_ptr wt, magmaDouble_ptr y, magmaDouble_ptr z, magmaDouble_ptr v, magmaDouble_ptr w, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dqmr_8_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi, vt, wt, y, z, v, w ); return MAGMA_SUCCESS; }
c1b2cfef1ecbfd73532479be47a9e1777a228e4a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Shuffle intrinsics CUDA Sample // This sample demonstrates the use of the shuffle intrinsic // First, a simple example of a prefix sum using the shuffle to // perform a scan operation is provided. // Secondly, a more involved example of computing an integral image // using the shuffle intrinsic is provided, where the shuffle // scan operation and shuffle xor operations are used #include <stdio.h> // CUDA Runtime #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <device_launch_parameters.h> // Utilities and system includes #include <helper_cuda.h> #include <helper_functions.h> //#include "shfl_integral_image.cuh" // Scan using shfl - takes log2(n) steps // This function demonstrates basic use of the shuffle intrinsic, __shfl_up, // to perform a scan operation across a block. // First, it performs a scan (prefix sum in this case) inside a warp // Then to continue the scan operation across the block, // each warp's sum is placed into shared memory. A single warp // then performs a shuffle scan on that shared memory. The results // are then uniformly added to each warp's threads. // This pyramid type approach is continued by placing each block's // final sum in global memory and prefix summing that via another kernel call, then // uniformly adding across the input data via the uniform_add<<<>>> kernel. __global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) { // block extern __shared__ int sums[]; // id int id = ((blockIdx.x * blockDim.x) + threadIdx.x); // ID int lane_id = id % warpSize; // blockID int warp_id = threadIdx.x / warpSize; // Below is the basic structure of using a shfl instruction // for a scan. // Record "value" as a variable - we accumulate it along the way int value = data[id]; // Now accumulate in log steps up the chain // compute sums, with another thread's value who is // distance delta away (i). Note // those threads where the thread 'i' away would have // been out of bounds of the warp are unaffected. This // creates the scan sum. #pragma unroll // for (int i = 1; i <= width; i *= 2) { // __shfl_up(, [1, 2, 4, ...,32], 32) int n = __shfl_up(value, i, width); // if (lane_id >= i) value += n; } // value now holds the scan value for the individual thread // next sum the largest values for each warp // write the sum of the warp to smem // (31) if (threadIdx.x % warpSize == warpSize - 1) { sums[warp_id] = value; } __syncthreads(); // // scan sum the warp sums // the same shfl scan operation, but performed on warp sums // // 0 // // if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) { int warp_sum = sums[lane_id]; for (int i = 1; i <= width; i *= 2) { int n = __shfl_up(warp_sum, i, width); if (lane_id >= i) warp_sum += n; } sums[lane_id] = warp_sum; } __syncthreads(); // perform a uniform add across warps in the block // read neighboring warp's sum and add it to threads value // int blockSum = 0; // scan if (warp_id > 0) { blockSum = sums[warp_id - 1]; } // value += blockSum; // data[id] = value; // last thread has sum, write write out the block's sum // if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) { partial_sums[blockIdx.x] = value; } } // Uniform add: add partial sums array __global__ void uniform_add(int *data, int *partial_sums, int len) { __shared__ int buf; int id = ((blockIdx.x * blockDim.x) + threadIdx.x); if (id > len) return; if (threadIdx.x == 0) { buf = partial_sums[blockIdx.x]; } __syncthreads(); data[id] += buf; } static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) { return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1); } // This function verifies the shuffle scan result, for the simple // prefix sum case. bool CPUverify(int *h_data, int *h_result, int n_elements) { // cpu verify for (int i = 0; i < n_elements - 1; i++) { h_data[i + 1] = h_data[i] + h_data[i + 1]; } int diff = 0; for (int i = 0; i < n_elements; i++) { diff += h_data[i] - h_result[i]; } printf("CPU verify result diff (GPUvsCPU) = %d\n", diff); bool bTestResult = false; if (diff == 0) bTestResult = true; StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); for (int j = 0; j < 100; j++) for (int i = 0; i < n_elements - 1; i++) { h_data[i + 1] = h_data[i] + h_data[i + 1]; } sdkStopTimer(&hTimer); double cput = sdkGetTimerValue(&hTimer); printf("CPU sum (naive) took %f ms\n", cput / 100); return bTestResult; } // this verifies the row scan result for synthetic data of all 1's unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) { unsigned int diff = 0; for (int j = 0; j < h; j++) { for (int i = 0; i < w; i++) { int gold = i + 1; diff += abs((int)gold - (int)h_image[j*w + i]); } } return diff; } bool shuffle_simple_test(int argc, char **argv) { // int *h_data, *h_partial_sums, *h_result; // int *d_data, *d_partial_sums; // const int n_elements = 8192; // (bytes) int sz = sizeof(int)*n_elements; int cuda_device = 0; // GPU checkCudaErrors(hipHostMalloc((void **)&h_data, sizeof(int)*n_elements)); checkCudaErrors(hipHostMalloc((void **)&h_result, sizeof(int)*n_elements)); // for (int i = 0; i < n_elements; i++) { h_data[i] = 1; } // block int blockSize = 1024; // gridblock = N/blockSize int gridSize = n_elements / blockSize; // blockwarp int nWarps = blockSize / 32; // (bytes) = warp int shmem_sz = nWarps * sizeof(int); // = gridblock int n_partialSums = n_elements / blockSize; // (bytes) int partial_sz = n_partialSums*sizeof(int); printf("Scan summation for %d elements, %d partial sums\n", n_elements, n_elements / blockSize); // int p_blockSize = min(n_partialSums, blockSize); int p_gridSize = iDivUp(n_partialSums, p_blockSize); printf("Partial summing %d elements with %d blocks of size %d\n", n_partialSums, p_gridSize, p_blockSize); // hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); float et = 0; float inc = 0; // checkCudaErrors(hipMalloc((void **)&d_data, sz)); checkCudaErrors(hipMalloc((void **)&d_partial_sums, partial_sz)); checkCudaErrors(hipMemset(d_partial_sums, 0, partial_sz)); checkCudaErrors(hipHostMalloc((void **)&h_partial_sums, partial_sz)); checkCudaErrors(hipMemcpy(d_data, h_data, sz, hipMemcpyHostToDevice)); checkCudaErrors(hipEventRecord(start, 0)); shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(d_data, 32, d_partial_sums); shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(d_partial_sums, 32); uniform_add << <gridSize - 1, blockSize >> >(d_data + blockSize, d_partial_sums, n_elements); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&inc, start, stop)); et += inc; checkCudaErrors(hipMemcpy(h_result, d_data, sz, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_partial_sums, d_partial_sums, partial_sz, hipMemcpyDeviceToHost)); printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]); printf("Time (ms): %f\n", et); printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et, n_elements / (et / 1000.0f) / 1000000.0f); bool bTestResult = CPUverify(h_data, h_result, n_elements); checkCudaErrors(hipHostFree(h_data)); checkCudaErrors(hipHostFree(h_result)); checkCudaErrors(hipHostFree(h_partial_sums)); checkCudaErrors(hipFree(d_data)); checkCudaErrors(hipFree(d_partial_sums)); return bTestResult; } int main(int argc, char *argv[]) { // Initialization. The shuffle intrinsic is not available on SM < 3.0 // so waive the test if the hardware is not present. int cuda_device = 0; printf("Starting shfl_scan\n"); // use command-line specified CUDA device, otherwise use device with highest Gflops/s cuda_device = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDevice(&cuda_device)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // __shfl intrinsic needs SM 3.0 or higher if (deviceProp.major < 3) { printf("> __shfl() intrinsic requires device SM 3.0+\n"); printf("> Waiving test.\n"); exit(EXIT_WAIVED); } bool bTestResult = true; bool simpleTest = shuffle_simple_test(argc, argv); //bool intTest = shuffle_integral_image_test(); //bTestResult = simpleTest & intTest; bTestResult = simpleTest; exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE); }
c1b2cfef1ecbfd73532479be47a9e1777a228e4a.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Shuffle intrinsics CUDA Sample // This sample demonstrates the use of the shuffle intrinsic // First, a simple example of a prefix sum using the shuffle to // perform a scan operation is provided. // Secondly, a more involved example of computing an integral image // using the shuffle intrinsic is provided, where the shuffle // scan operation and shuffle xor operations are used #include <stdio.h> // CUDA Runtime #include <cuda_runtime.h> #include <device_functions.h> #include <device_launch_parameters.h> // Utilities and system includes #include <helper_cuda.h> #include <helper_functions.h> //#include "shfl_integral_image.cuh" // Scan using shfl - takes log2(n) steps // This function demonstrates basic use of the shuffle intrinsic, __shfl_up, // to perform a scan operation across a block. // First, it performs a scan (prefix sum in this case) inside a warp // Then to continue the scan operation across the block, // each warp's sum is placed into shared memory. A single warp // then performs a shuffle scan on that shared memory. The results // are then uniformly added to each warp's threads. // This pyramid type approach is continued by placing each block's // final sum in global memory and prefix summing that via another kernel call, then // uniformly adding across the input data via the uniform_add<<<>>> kernel. __global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) { // 申请共享内存,block之间的共享内存不可相互访问 extern __shared__ int sums[]; // 获取全局id int id = ((blockIdx.x * blockDim.x) + threadIdx.x); // 线程束内线程ID int lane_id = id % warpSize; // block内的局部线程束ID int warp_id = threadIdx.x / warpSize; // Below is the basic structure of using a shfl instruction // for a scan. // Record "value" as a variable - we accumulate it along the way int value = data[id]; // Now accumulate in log steps up the chain // compute sums, with another thread's value who is // distance delta away (i). Note // those threads where the thread 'i' away would have // been out of bounds of the warp are unaffected. This // creates the scan sum. #pragma unroll // 步长越来越长 for (int i = 1; i <= width; i *= 2) { // __shfl_up(变量, [1, 2, 4, ...,32], 32) int n = __shfl_up(value, i, width); // 没对齐的部分不用相加 if (lane_id >= i) value += n; } // value now holds the scan value for the individual thread // next sum the largest values for each warp // write the sum of the warp to smem // 每个线程束最后一个线程(31)将本线程束的归约值写入共享内存 if (threadIdx.x % warpSize == warpSize - 1) { sums[warp_id] = value; } __syncthreads(); // // scan sum the warp sums // the same shfl scan operation, but performed on warp sums // // 只有第0个线程工作 // 且数据落入线程内的线程才工作 // 最后归约结果写回共享内存中 if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) { int warp_sum = sums[lane_id]; for (int i = 1; i <= width; i *= 2) { int n = __shfl_up(warp_sum, i, width); if (lane_id >= i) warp_sum += n; } sums[lane_id] = warp_sum; } __syncthreads(); // perform a uniform add across warps in the block // read neighboring warp's sum and add it to threads value // int blockSum = 0; // 非第一个线程束的每个线程获得该线程束scan到的值(读的值串一个值) if (warp_id > 0) { blockSum = sums[warp_id - 1]; } // 本线程持有值加上 value += blockSum; // 写出自己的值 data[id] = value; // last thread has sum, write write out the block's sum // 表明是第一次调用内核,需要写入部分核数组,供下次调用使用 if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) { partial_sums[blockIdx.x] = value; } } // Uniform add: add partial sums array __global__ void uniform_add(int *data, int *partial_sums, int len) { __shared__ int buf; int id = ((blockIdx.x * blockDim.x) + threadIdx.x); if (id > len) return; if (threadIdx.x == 0) { buf = partial_sums[blockIdx.x]; } __syncthreads(); data[id] += buf; } static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) { return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1); } // This function verifies the shuffle scan result, for the simple // prefix sum case. bool CPUverify(int *h_data, int *h_result, int n_elements) { // cpu verify for (int i = 0; i < n_elements - 1; i++) { h_data[i + 1] = h_data[i] + h_data[i + 1]; } int diff = 0; for (int i = 0; i < n_elements; i++) { diff += h_data[i] - h_result[i]; } printf("CPU verify result diff (GPUvsCPU) = %d\n", diff); bool bTestResult = false; if (diff == 0) bTestResult = true; StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); for (int j = 0; j < 100; j++) for (int i = 0; i < n_elements - 1; i++) { h_data[i + 1] = h_data[i] + h_data[i + 1]; } sdkStopTimer(&hTimer); double cput = sdkGetTimerValue(&hTimer); printf("CPU sum (naive) took %f ms\n", cput / 100); return bTestResult; } // this verifies the row scan result for synthetic data of all 1's unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) { unsigned int diff = 0; for (int j = 0; j < h; j++) { for (int i = 0; i < w; i++) { int gold = i + 1; diff += abs((int)gold - (int)h_image[j*w + i]); } } return diff; } bool shuffle_simple_test(int argc, char **argv) { // 主机输入数组、部分和、结果 int *h_data, *h_partial_sums, *h_result; // 设备输入数组、部分和 int *d_data, *d_partial_sums; // 归约元素 const int n_elements = 8192; // 归约元素的长度(bytes) int sz = sizeof(int)*n_elements; int cuda_device = 0; // GPU分配内存,长度相同 checkCudaErrors(cudaMallocHost((void **)&h_data, sizeof(int)*n_elements)); checkCudaErrors(cudaMallocHost((void **)&h_result, sizeof(int)*n_elements)); // 初始化数据 for (int i = 0; i < n_elements; i++) { h_data[i] = 1; } // block内线程数 int blockSize = 1024; // grid内block数 = N/blockSize int gridSize = n_elements / blockSize; // 一个block中warp的数 int nWarps = blockSize / 32; // 共享内存长度(bytes) = warp数 int shmem_sz = nWarps * sizeof(int); // 部分和数组长度 = grid内block数 int n_partialSums = n_elements / blockSize; // 部分和数组长度(bytes) int partial_sz = n_partialSums*sizeof(int); printf("Scan summation for %d elements, %d partial sums\n", n_elements, n_elements / blockSize); // 第二次调用内核时的线程块数 int p_blockSize = min(n_partialSums, blockSize); int p_gridSize = iDivUp(n_partialSums, p_blockSize); printf("Partial summing %d elements with %d blocks of size %d\n", n_partialSums, p_gridSize, p_blockSize); // 初始化计时器 cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); float et = 0; float inc = 0; // 申请设备内存 checkCudaErrors(cudaMalloc((void **)&d_data, sz)); checkCudaErrors(cudaMalloc((void **)&d_partial_sums, partial_sz)); checkCudaErrors(cudaMemset(d_partial_sums, 0, partial_sz)); checkCudaErrors(cudaMallocHost((void **)&h_partial_sums, partial_sz)); checkCudaErrors(cudaMemcpy(d_data, h_data, sz, cudaMemcpyHostToDevice)); checkCudaErrors(cudaEventRecord(start, 0)); shfl_scan_test << <gridSize, blockSize, shmem_sz >> >(d_data, 32, d_partial_sums); shfl_scan_test << <p_gridSize, p_blockSize, shmem_sz >> >(d_partial_sums, 32); uniform_add << <gridSize - 1, blockSize >> >(d_data + blockSize, d_partial_sums, n_elements); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&inc, start, stop)); et += inc; checkCudaErrors(cudaMemcpy(h_result, d_data, sz, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_partial_sums, d_partial_sums, partial_sz, cudaMemcpyDeviceToHost)); printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]); printf("Time (ms): %f\n", et); printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et, n_elements / (et / 1000.0f) / 1000000.0f); bool bTestResult = CPUverify(h_data, h_result, n_elements); checkCudaErrors(cudaFreeHost(h_data)); checkCudaErrors(cudaFreeHost(h_result)); checkCudaErrors(cudaFreeHost(h_partial_sums)); checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFree(d_partial_sums)); return bTestResult; } int main(int argc, char *argv[]) { // Initialization. The shuffle intrinsic is not available on SM < 3.0 // so waive the test if the hardware is not present. int cuda_device = 0; printf("Starting shfl_scan\n"); // use command-line specified CUDA device, otherwise use device with highest Gflops/s cuda_device = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDevice(&cuda_device)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // __shfl intrinsic needs SM 3.0 or higher if (deviceProp.major < 3) { printf("> __shfl() intrinsic requires device SM 3.0+\n"); printf("> Waiving test.\n"); exit(EXIT_WAIVED); } bool bTestResult = true; bool simpleTest = shuffle_simple_test(argc, argv); //bool intTest = shuffle_integral_image_test(); //bTestResult = simpleTest & intTest; bTestResult = simpleTest; exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE); }
3de762ae1ba2f9832959f46842e1aea4db9b86dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/core/Array.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <c10/util/MaybeOwned.h> #include <THH/THHTensorInfo.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> namespace at { namespace native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); auto mask_temp = (mask.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(mask); auto self_temp = (self.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(self); // Cannot reassign to mask_temp and self_temp here! if they are // owning and expand_outplace returns a borrow, the returned borrow // would dangle. auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp); at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))})); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const Tensor& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data<scalar_t>(); if (accumulate) { const auto numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const Tensor& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { template <typename mask_t> void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){ auto srcSize = source.numel(); // Determine our output size auto totalElements = mask.sum().item<int64_t>(); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` TORCH_CHECK(totalElements <= srcSize, "source nElements must be == mask `1` elements"); auto mask_cont = mask.contiguous(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum = at::empty_like(mask, mask.options().dtype(kLong)); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); thrust::device_ptr<mask_t> maskData(mask_cont.data_ptr<mask_t>()); thrust::device_ptr<int64_t> maskPrefixSumData( maskPrefixSum.data_ptr<int64_t>()); // Reference for using static_cast on `init_value`: // https://github.com/NVIDIA/thrust/issues/1379 thrust::exclusive_scan( thrust::hip::par(allocator).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()), maskData, maskData + mask_cont.numel(), maskPrefixSumData, static_cast<int64_t>(0)); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_borrowed_output(self) .add_borrowed_input(self) .add_borrowed_input(mask_cont) .add_borrowed_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); hipGetLastError(); }); } } // anonymous namespace Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) { at::assert_no_internal_overlap(self); TORCH_CHECK( self.scalar_type() == source.scalar_type(), "masked_scatter: expected self and source to have same dtypes but got", self.scalar_type(), " and ", source.scalar_type()); TensorArg self_arg{self, "self", 1}; TensorArg mask_arg{mask, "mask", 2}; TensorArg source_arg{source, "source", 3}; checkAllSameGPU(__func__, {self_arg, mask_arg, source_arg}); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_"); if (b_mask->dtype() == ScalarType::Byte) { TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); } auto mask_dtype = b_mask->scalar_type(); if (mask_dtype == ScalarType::Bool) { masked_scatter_cuda_impl<bool>(self, *b_mask, source); } else { masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source); } return self; } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); }} // namespace at::native
3de762ae1ba2f9832959f46842e1aea4db9b86dd.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/core/Array.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <c10/util/MaybeOwned.h> #include <THC/THCTensorInfo.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> namespace at { namespace native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); auto mask_temp = (mask.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(mask); auto self_temp = (self.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(self); // Cannot reassign to mask_temp and self_temp here! if they are // owning and expand_outplace returns a borrow, the returned borrow // would dangle. auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp); at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))})); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const Tensor& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data<scalar_t>(); if (accumulate) { const auto numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const Tensor& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { template <typename mask_t> void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){ auto srcSize = source.numel(); // Determine our output size auto totalElements = mask.sum().item<int64_t>(); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` TORCH_CHECK(totalElements <= srcSize, "source nElements must be == mask `1` elements"); auto mask_cont = mask.contiguous(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum = at::empty_like(mask, mask.options().dtype(kLong)); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); thrust::device_ptr<mask_t> maskData(mask_cont.data_ptr<mask_t>()); thrust::device_ptr<int64_t> maskPrefixSumData( maskPrefixSum.data_ptr<int64_t>()); // Reference for using static_cast on `init_value`: // https://github.com/NVIDIA/thrust/issues/1379 thrust::exclusive_scan( thrust::cuda::par(allocator).on(c10::cuda::getCurrentCUDAStream()), maskData, maskData + mask_cont.numel(), maskPrefixSumData, static_cast<int64_t>(0)); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_borrowed_output(self) .add_borrowed_input(self) .add_borrowed_input(mask_cont) .add_borrowed_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); cudaGetLastError(); }); } } // anonymous namespace Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) { at::assert_no_internal_overlap(self); TORCH_CHECK( self.scalar_type() == source.scalar_type(), "masked_scatter: expected self and source to have same dtypes but got", self.scalar_type(), " and ", source.scalar_type()); TensorArg self_arg{self, "self", 1}; TensorArg mask_arg{mask, "mask", 2}; TensorArg source_arg{source, "source", 3}; checkAllSameGPU(__func__, {self_arg, mask_arg, source_arg}); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_"); if (b_mask->dtype() == ScalarType::Byte) { TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); } auto mask_dtype = b_mask->scalar_type(); if (mask_dtype == ScalarType::Bool) { masked_scatter_cuda_impl<bool>(self, *b_mask, source); } else { masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source); } return self; } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); }} // namespace at::native
31a69356543d6571c0946866dc09dd3f2de4599e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> #define Mask_size 3 //filter size #define Width 1024 // image width #define Height 1024 // image height #define N (Width*Height) //---------------kernel------------------- __global__ void ConvolutionKernel (int *I_input, int *Mask1,int *Mask2,int *I_output1,int *I_output2) { /* Thread Row Index */ int Row = blockIdx.y * blockDim.y + threadIdx.y; /* Thread column Index */ int Col = blockIdx.x * blockDim.x + threadIdx.x; float value1 = 0; float value2 = 0; int Index = Row*Width+Col; //output Image index /* convolution */ for(int i=0; i<Mask_size; i++) { for(int j=0; j<Mask_size; j++) { int R_start = i + Row - 1; int C_start = j + Col - 1; if((C_start>= 0 && C_start < Width) && (R_start>= 0 && R_start < Height)) { value1 += Mask1[i * Mask_size + j] * I_input[R_start* Width + C_start]; value2 += Mask2[i * Mask_size + j] * I_input[R_start* Width + C_start]; } } } if((Row < Height) && (Col < Width)) { I_output1[Index] = value1; // convolved image I_output2[Index] = value2; } } //----------------------------main----------------------------------- int main(void) { //------------------------------------------------------------------- int *Image, *Output1,*Output2; int *mask1, *mask2; int SIZE= Width*Height*sizeof(int); int Row,Col; Image= (int *)malloc(SIZE); Output1= (int *)malloc(SIZE); Output2= (int *)malloc(SIZE); mask1= (int *)malloc(Mask_size*Mask_size*sizeof(int)); mask2= (int *)malloc(Mask_size*Mask_size*sizeof(int)); //------------------------------------------------------------------- int *d_image, *d_mask1,*d_mask2,*d_output1, *d_output2; /* pointer to device memory for input image, mask and output */ //----------------------------------------------------------- for(Row=0;Row<Width;Row++) for(Col=0;Col<Height;Col++) { Image[Row*Width+Col]=1; Output1[Row*Width+Col]=0; Output2[Row*Width+Col]=0; } //----------------------------------------------------------- for(Row=0;Row<Mask_size;Row++) for(Col=0;Col<Mask_size;Col++) { mask1[Row*Mask_size+Col]=1; mask2[Row*Mask_size+Col]=2; } //------------------------------------------------------ /* Device Memory Allocation */ hipMalloc(&d_image, (Width*Height)* sizeof(int)); hipMalloc(&d_output1, (Width*Height)* sizeof(int)); hipMalloc(&d_output2, (Width*Height)* sizeof(int)); hipMalloc(&d_mask1, (Mask_size*Mask_size)* sizeof(int)); hipMalloc(&d_mask2, (Mask_size*Mask_size)* sizeof(int)); //--------------------------------------------------------- hipEvent_t start, stop; // Cuda API to measure time for Cuda Kernel Execution. hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //-------------------------------------------------------- /*Copying Input Image to GPU Memory */ hipMemcpy(d_image, Image, (Width*Height)* sizeof(int), hipMemcpyHostToDevice); /*Copying Mask to GPU Memory */ hipMemcpy(d_mask1, mask1, (Mask_size*Mask_size)* sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_mask2, mask2, (Mask_size*Mask_size)* sizeof(int), hipMemcpyHostToDevice); /* Two Dimesional blocks with two dimensional threads */ dim3 grid(((Width)/Mask_size),((Height)/Mask_size)); /*Number of threads per block is 3x3=9 */ dim3 block(Mask_size,Mask_size); //--------------------------------------------- printf ("GPU Executing Convolution Kernel...\n") ; printf("\n"); //-------------------------------------------- /*Kernel Launch configuration*/ hipLaunchKernelGGL(( ConvolutionKernel) , dim3(grid), dim3(block) , 0, 0, d_image, d_mask1,d_mask2,d_output1, d_output2); /*copying output Image to Host Memory*/ hipMemcpy(Output1, d_output1, (Width*Height)* sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Output2, d_output2, (Width*Height)* sizeof(int), hipMemcpyDeviceToHost); //------------------------------------------- hipEventRecord(stop); hipEventSynchronize(stop); // Blocks CPU execution until Device Kernel finishes its job. float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("GPU Execution Time for Convolution Kernel: %fn\n", milliseconds); //GPU Execution Time. printf("Effective Bandwidth (GB/s): %fn\n", N*4*2/milliseconds/1e6); //N*4 is the total number of Bytes transferred and (1+1)=2 is for read Input Image and write Output Image. printf("\n"); //------------------------------------------ free(Image); free(Output1); free(Output2); free(mask1); free(mask2); hipFree(d_image); hipFree(d_mask1); hipFree(d_mask2); hipFree(d_output1); hipFree(d_output2); return 0; }
31a69356543d6571c0946866dc09dd3f2de4599e.cu
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include <time.h> #define Mask_size 3 //filter size #define Width 1024 // image width #define Height 1024 // image height #define N (Width*Height) //---------------kernel------------------- __global__ void ConvolutionKernel (int *I_input, int *Mask1,int *Mask2,int *I_output1,int *I_output2) { /* Thread Row Index */ int Row = blockIdx.y * blockDim.y + threadIdx.y; /* Thread column Index */ int Col = blockIdx.x * blockDim.x + threadIdx.x; float value1 = 0; float value2 = 0; int Index = Row*Width+Col; //output Image index /* convolution */ for(int i=0; i<Mask_size; i++) { for(int j=0; j<Mask_size; j++) { int R_start = i + Row - 1; int C_start = j + Col - 1; if((C_start>= 0 && C_start < Width) && (R_start>= 0 && R_start < Height)) { value1 += Mask1[i * Mask_size + j] * I_input[R_start* Width + C_start]; value2 += Mask2[i * Mask_size + j] * I_input[R_start* Width + C_start]; } } } if((Row < Height) && (Col < Width)) { I_output1[Index] = value1; // convolved image I_output2[Index] = value2; } } //----------------------------main----------------------------------- int main(void) { //------------------------------------------------------------------- int *Image, *Output1,*Output2; int *mask1, *mask2; int SIZE= Width*Height*sizeof(int); int Row,Col; Image= (int *)malloc(SIZE); Output1= (int *)malloc(SIZE); Output2= (int *)malloc(SIZE); mask1= (int *)malloc(Mask_size*Mask_size*sizeof(int)); mask2= (int *)malloc(Mask_size*Mask_size*sizeof(int)); //------------------------------------------------------------------- int *d_image, *d_mask1,*d_mask2,*d_output1, *d_output2; /* pointer to device memory for input image, mask and output */ //----------------------------------------------------------- for(Row=0;Row<Width;Row++) for(Col=0;Col<Height;Col++) { Image[Row*Width+Col]=1; Output1[Row*Width+Col]=0; Output2[Row*Width+Col]=0; } //----------------------------------------------------------- for(Row=0;Row<Mask_size;Row++) for(Col=0;Col<Mask_size;Col++) { mask1[Row*Mask_size+Col]=1; mask2[Row*Mask_size+Col]=2; } //------------------------------------------------------ /* Device Memory Allocation */ cudaMalloc(&d_image, (Width*Height)* sizeof(int)); cudaMalloc(&d_output1, (Width*Height)* sizeof(int)); cudaMalloc(&d_output2, (Width*Height)* sizeof(int)); cudaMalloc(&d_mask1, (Mask_size*Mask_size)* sizeof(int)); cudaMalloc(&d_mask2, (Mask_size*Mask_size)* sizeof(int)); //--------------------------------------------------------- cudaEvent_t start, stop; // Cuda API to measure time for Cuda Kernel Execution. cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //-------------------------------------------------------- /*Copying Input Image to GPU Memory */ cudaMemcpy(d_image, Image, (Width*Height)* sizeof(int), cudaMemcpyHostToDevice); /*Copying Mask to GPU Memory */ cudaMemcpy(d_mask1, mask1, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mask2, mask2, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice); /* Two Dimesional blocks with two dimensional threads */ dim3 grid(((Width)/Mask_size),((Height)/Mask_size)); /*Number of threads per block is 3x3=9 */ dim3 block(Mask_size,Mask_size); //--------------------------------------------- printf ("GPU Executing Convolution Kernel...\n") ; printf("\n"); //-------------------------------------------- /*Kernel Launch configuration*/ ConvolutionKernel <<<grid, block >>>(d_image, d_mask1,d_mask2,d_output1, d_output2); /*copying output Image to Host Memory*/ cudaMemcpy(Output1, d_output1, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Output2, d_output2, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost); //------------------------------------------- cudaEventRecord(stop); cudaEventSynchronize(stop); // Blocks CPU execution until Device Kernel finishes its job. float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU Execution Time for Convolution Kernel: %fn\n", milliseconds); //GPU Execution Time. printf("Effective Bandwidth (GB/s): %fn\n", N*4*2/milliseconds/1e6); //N*4 is the total number of Bytes transferred and (1+1)=2 is for read Input Image and write Output Image. printf("\n"); //------------------------------------------ free(Image); free(Output1); free(Output2); free(mask1); free(mask2); cudaFree(d_image); cudaFree(d_mask1); cudaFree(d_mask2); cudaFree(d_output1); cudaFree(d_output2); return 0; }
11f435ed4f2f11f8d2a1fe9e1571350245d7b3ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void firstParallel() { printf("This is running in parallel.\n"); } int main() { hipLaunchKernelGGL(( firstParallel), dim3(5), dim3(5), 0, 0, ); hipDeviceSynchronize(); }
11f435ed4f2f11f8d2a1fe9e1571350245d7b3ba.cu
#include <stdio.h> __global__ void firstParallel() { printf("This is running in parallel.\n"); } int main() { firstParallel<<<5, 5>>>(); cudaDeviceSynchronize(); }
5c27a946911d6f1857f5a534b355e607ba4b2f7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "N3LDG_cuda.h" #include <array> #include <boost/format.hpp> #include <cstdlib> #include <cstddef> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <rocblas.h> #include "Printf_cuda.cuh" #include "Printf_cuda.cu" #include "Memory_cuda.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cnmem.h" #include <string> #include <utility> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" namespace n3ldg_cuda { using namespace std; using boost::format; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #define cuda_log(x) logf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #define cuda_log(x) log(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(hipError_t status) { if (status != hipSuccess) { cout << "cuda error:" << hipGetErrorString(status) << endl; abort(); } } #if TEST_CUDA void CheckCudaError() { hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { std::cout << "cuda error:" << hipGetErrorName(error) << std::endl; std::cout << "cuda error:" << hipGetErrorString(error) << std::endl; abort(); } } #else #define CheckCudaError() #endif void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(hipblasStatus_t status) { assert(status == HIPBLAS_STATUS_SUCCESS); } void CallCurand(hiprandStatus_t status) { assert(status == HIPRAND_STATUS_SUCCESS); } hipblasHandle_t& GetCublasHandle() { static hipblasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(hipblasCreate(&handle)); } return handle; } hipError_t MyCudaMemcpy(void *dest, const void *src, size_t count, hipMemcpyKind kind) { hipError_t e; e = hipMemcpy(dest, src, count, kind); CallCuda(e); return e; } void NumberPointerArray::init(dtype **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), hipMemcpyHostToDevice)); this->len = len; } NumberPointerArray::~NumberPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } void NumberPointerPointerArray::init(dtype ***host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), hipMemcpyHostToDevice)); this->len = len; } NumberPointerPointerArray::~NumberPointerPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void NumberArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype))); this->len = len; } void NumberArray::init(dtype *host_arr, int len) { init(len); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype), hipMemcpyHostToDevice)); } NumberArray::~NumberArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), hipMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), hipMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), hipMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntPointerArray::init(int **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*), hipMemcpyHostToDevice)); this->len = len; } IntPointerArray::~IntPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntArray::init(int *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int), hipMemcpyHostToDevice)); this->len = len; } void IntArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); this->len = len; } IntArray::~IntArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void BoolArray::init(bool *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), hipMemcpyHostToDevice)); this->len = len; } void BoolArray::copyFromHost(bool *host_arr) { CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), hipMemcpyHostToDevice)); } void BoolArray::copyToHost(bool *host_arr) { CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool), hipMemcpyDeviceToHost)); } BoolArray::~BoolArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnMemoryAndDevice(int dim) { initOnDevice(dim); v = new dtype[dim]; zero(); } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), hipMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), hipMemcpyDeviceToHost)); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, hipMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), hipMemcpyDeviceToHost)); } void Assert(bool v) { #if TEST_CUDA if (!v) { abort(); } #endif } __device__ void DeviceAtomicAdd(dtype* address, dtype value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0); }; __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), hipMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return ::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; hipLaunchKernelGGL(( KernelZero), dim3(block_count), dim3(TPB), 0, 0, v, len); CheckCudaError(); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%f\n", p[i]); } } void PrintNums(const dtype* p, int len) { hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { hipLaunchKernelGGL(( KernelPrintInts), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); CheckCudaError(); } void InitCuda(int device_id) { std::cout << "device_id:" << device_id << std::endl; CallCuda(hipSetDeviceFlags(hipDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 10000000000; device.device = device_id; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(hipSetDevice(device_id)); #endif CallCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); CallCuda(cudaPrintfInit()); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype **dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelCopyFromOneVectorToMultiVectors), dim3(block_count), dim3(TPB), 0, 0, src, val_arr.value, count, len); CheckCudaError(); } void CopyFromHostToDevice(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_src == NULL) { std::cout << "out of memory!" << std::endl; abort(); } for (int i = 0; i < count; ++i) { memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype)); } dtype *long_dest = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest, count * dim * sizeof(dtype*))); CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype*), hipMemcpyHostToDevice)); CopyFromOneVectorToMultiVals(long_dest, dest, count, dim); free(long_src); CallCuda(MemoryPool::Ins().Free(long_dest)); } __global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[i] = src[count_i][len_i]; } } void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src, dtype *dest, int count, int len) { NumberPointerArray src_arr; src_arr.init((dtype**)src.data(), src.size()); int block_count = DefaultBlockCount(len * count); hipLaunchKernelGGL(( KernelCopyFromMultiVectorsToOneVector), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)src_arr.value, dest, count, len); CheckCudaError(); } void CopyFromDeviceToHost(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_src, count * dim * sizeof(dtype*))); CopyFromMultiVectorsToOneVector(src, long_src, count, dim); dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_dest == NULL) { std::cout << "out of memory!" << std::endl; abort(); } CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype), hipMemcpyDeviceToHost)); for (int i = 0; i < count; ++i) { memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype)); } CallCuda(MemoryPool::Ins().Free(long_src)); free(long_dest); } __global__ void KernelActivated(ActivatedEnum activated, const dtype *src, dtype**dest, dtype* dest2, int count, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; for (int i = index; i < len * count; i += step) { int count_i = i / len; int len_i = i % len; dtype result; if (activated == ActivatedEnum::TANH) { result = cuda_tanh(src[i]); } else if (activated == ActivatedEnum::SIGMOID) { result = cuda_sigmoid(src[i]); } else if (activated == ActivatedEnum::RELU) { result = cuda_relu(src[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { result = cuda_leaky_relu(src[i]); } else if (activated == ActivatedEnum::SELU) { result = cuda_selu(src[i]); } else { printf("KernelActivated error\n"); return; } dest[count_i][len_i] = result; dest2[i] = result; } } void Activated(ActivatedEnum activated, const dtype *src, const std::vector<dtype*>& dest, dtype *dest2, int len) { int count = dest.size(); NumberPointerArray dest_arr; dest_arr.init((dtype**)dest.data(), dest.size()); int block_count = ::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT); hipLaunchKernelGGL(( KernelActivated), dim3(block_count), dim3(TPB), 0, 0, activated, src, dest_arr.value, dest2, count, len); CheckCudaError(); } __global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs, int count, int dim, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else { printf("error\n"); } } } void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs, int count, int dim, std::vector<dtype*> &ys) { NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelTanhForward), dim3(block_count), dim3(TPB), 0, 0, activated, (const dtype**)x_arr.value, count, dim, y_arr.value); CheckCudaError(); } __global__ void KernelTanhBackward(ActivatedEnum activated, const dtype **losses, const dtype **vals, int count, int dim, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype v; if (activated == ActivatedEnum::TANH) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] * vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) * vals[count_i][dim_i]; } atomicAdd(in_losses[count_i] + dim_i, v); } } void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, std::vector<dtype*> &in_losses) { NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelTanhBackward), dim3(block_count), dim3(TPB), 0, 0, activated ,(const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelDropoutForward(const dtype** xs, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } else { ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0 || drop_factor >= 1.0f) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value, count, dim, is_training, drop_mask, drop_factor, y_arr.value); CheckCudaError(); } __global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] >= drop_factor) { atomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } else { atomicAdd(in_losses[count_i] + dim_i, (1 - drop_factor) * losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0 || drop_factor >= 1) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, is_training, drop_mask, drop_factor, in_loss_arr.value); CheckCudaError(); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int count_i = i / x_len; int len_i = i % x_len; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = ::min((count * len - 1 + TPB) / TPB, 56); hipLaunchKernelGGL(( KernelCopyForUniNodeForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); CheckCudaError(); } __global__ void KernelCopyForBiNodeForward(const dtype **x1s, const dtype **x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int x1_total_len = count * x1_len; int x2_total_len = count * x2_len; int b_total_len = count * b_len; int total_len = x1_total_len + x2_total_len + b_total_len; for (int i = index; i < total_len; i += step) { if (i < x2_total_len) { int len_i = i % x2_len; int count_i = i / x2_len; x2s_dest[i] = x2s[count_i][len_i]; } else if (i >= x2_total_len && i < x1_total_len + x2_total_len) { int len_i = (i - x2_total_len) % x1_len; int count_i = (i - x2_total_len) / x1_len; x1s_dest[i - x2_total_len] = x1s[count_i][len_i]; } else { int b_i = (i - x1_total_len - x2_total_len); int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForBiNodeForward(const std::vector<dtype*>& x1s, const std::vector<dtype *>& x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int len = x1_len + x2_len + b_len; int block_count = DefaultBlockCount(count * len); NumberPointerArray x1_arr, x2_arr; x1_arr.init((dtype**)x1s.data(), x1s.size()); x2_arr.init((dtype**)x2s.data(), x2s.size()); hipLaunchKernelGGL(( KernelCopyForBiNodeForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x1_arr.value, (const dtype**)x2_arr.value, b, x1s_dest, x2s_dest, b_dest, count, x1_len, x2_len, b_len); CheckCudaError(); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { hipblasHandle_t &handle = GetCublasHandle(); dtype alpha = 1; dtype beta = useb? 1 : 0; hipblasOperation_t x_op = should_x_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldx = should_x_transpose ? count : col; hipblasOperation_t W_op = should_W_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldw = should_W_transpose ? col : row; #if USE_FLOAT CallCublas(hipblasSgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #else CallCublas(hipblasDgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.0001) { *success = false; printf("KernelVerify %s: host:%f device:%f loss:%f index:%d\n", message, host[index], device[index], loss, index); KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } constexpr int MAX_BLOCK_POWER = 100; MemoryPool& MemoryPool::Ins() { static MemoryPool *p; if (p == NULL) { p = new MemoryPool; p->free_blocks_.resize(MAX_BLOCK_POWER + 1); p->busy_blocks_.reserve(10000); } return *p; } void appendFreeBlock(const MemoryBlock &memory_block, vector<map<void*, MemoryBlock>> &free_blocks, int i, const unordered_map<void*, MemoryBlock> &busy_blocks) { if (memory_block.size != (1 << i)) { cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i << endl; abort(); } free_blocks.at(i).insert(make_pair(memory_block.p, memory_block)); } hipError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return hipSuccess; #elif DEVICE_MEMORY == 1 hipError_t r = hipMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } hipError_t status = hipErrorMemoryAllocation; int loop = 0; while (status != hipSuccess) { //cout << "n:" << n << endl; if (free_blocks_.at(n).empty()) { //cout << "free_blocks_.at(n).empty()" << endl; int higher_power = n + 1; //cout << "higher_power:" << higher_power << endl; while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) { ++higher_power; } //cout << "higher_power:" << higher_power << endl; if (higher_power > MAX_BLOCK_POWER) { while (status != hipSuccess) { status = hipMalloc(p, fit_size); } CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); //cout << "malloc successfully" << endl; } else { //cout << "higher_power:" << higher_power << endl; auto &v = free_blocks_.at(higher_power); MemoryBlock &to_split = v.rbegin()->second; int half_size = to_split.size >> 1; void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) + half_size); MemoryBlock low_block(to_split.p, half_size, to_split.buddy), high_block(half_address, half_size, to_split.p); v.erase(v.rbegin()->first); appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_); appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_); } } else { status = hipSuccess; int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).rbegin()->second; *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first); } ++loop; } profiler.EndEvent(); return status; #endif } std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "a.size is not equal to b.size" << endl; abort(); } int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p); if (distance == 0) { cerr << "block a and b has the same address" << endl; abort(); } const MemoryBlock &low = distance > 0 ? b : a; const MemoryBlock &high = distance > 0 ? a : b; return std::make_pair(&low, &high); } bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { return false; } auto pair = lowerAndhigherBlocks(a, b); return pair.second->buddy == pair.first->p && ((char*)pair.second->p - (char*)pair.first->p) == a.size; } MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "sizes of memory blocks to merge not equal" << endl; abort(); } auto pair = lowerAndhigherBlocks(a, b); if ((char*)pair.second->p - (char*)pair.first->p != a.size || (a.p != b.buddy && a.buddy != b.p)) { cerr << "a and b are not buddies" << endl; cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl; abort(); } MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy); return block; } void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks, int power, const unordered_map<void*, MemoryBlock> &busy_blocks) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("returnFreeBlock"); MemoryBlock current_block = block; for (int i = power; i <= MAX_BLOCK_POWER; ++i) { map<void*, MemoryBlock> &v = free_blocks.at(i); void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ? current_block.buddy : (void*)((char*)current_block.p + current_block.size); auto it = v.find(free_p); if (it == v.end() || (it->second.p != current_block.buddy && it->second.buddy != current_block.p)) { appendFreeBlock(current_block, free_blocks, i, busy_blocks); break; } else { MemoryBlock merged_block = mergeBlocks(it->second, current_block); current_block = merged_block; v.erase(it); } } profiler.EndEvent(); } hipError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 hipError_t r = hipFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { cerr << "cannot find busy block " << p << endl; abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } if (it->second.size != (1 << n)) { cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl; abort(); } auto block = it->second; busy_blocks_.erase(it); returnFreeBlock(block, free_blocks_, n, busy_blocks_); it = busy_blocks_.find(p); if (it != busy_blocks_.end()) { cerr << "can find erased block " << p << endl; abort(); } profiler.EndEvent(); if (busy_blocks_.find(p) != busy_blocks_.end()) { cerr << boost::format("Malloc - find freed p in busy blocks") << endl; } return hipSuccess; #endif } void Profiler::EndCudaEvent() { hipDeviceSynchronize(); EndEvent(); } __global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated, const dtype *const*ly, const dtype *ty, const dtype *y, dtype *lty, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype yi = y[i]; dtype lyv = ly[count_i][dim_i]; if (activated == ActivatedEnum::TANH) { lty[i] = lyv * cuda_dtanh(yi); } else if (activated == ActivatedEnum::SIGMOID) { lty[i] = lyv * cuda_dsigmoid(yi); } else if (activated == ActivatedEnum::RELU) { lty[i] = lyv * cuda_drelu(ty[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { lty[i] = lyv * cuda_dleaky_relu(ty[i]); } else if (activated == ActivatedEnum::SELU) { lty[i] = lyv * cuda_dselu(ty[i], yi); } else { printf("KernelCalculateLtyForUniBackward error\n"); } } } void CalculateLtyForUniBackward(ActivatedEnum activated, const std::vector<dtype*> &ly, const dtype *ty, const dtype *y, dtype *lty, int count, int dim) { NumberPointerArray ly_arr; ly_arr.init((dtype**)ly.data(), ly.size()); int block_count = ::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); hipLaunchKernelGGL(( KernelCalculateLtyForUniBackward), dim3(block_count), dim3(TPB), 0, 0, activated, ly_arr.value, ty, y, lty, count, dim); CheckCudaError(); hipDeviceSynchronize(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype **losses, int count, int out_dim, int in_dim, dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = count_i * out_dim + dim_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx, b, loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); CheckCudaError(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype **losses1, dtype **losses2, int count, int out_dim, int in_dim1, int in_dim2, dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } //int lty_index = dim_i * count + count_i; int lty_index = dim_i + count_i * out_dim; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim1; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i + count_i * in_dim2; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count, out_dim, in_dim1, in_dim2, block_sums.value, global_block_count_arr.value); CheckCudaError(); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(hiprandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { hiprand_init(0, i, 0, &states[i]); } } hiprandState_t *GetCurandStates() { static hiprandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(hiprandState_t) * MAX_BATCH_COUNT)); hipLaunchKernelGGL(( KernelInitCurandStates), dim3(BLOCK_COUNT), dim3(TPB), 0, 0, states); CheckCudaError(); } return states; } hiprandGenerator_t &GetGenerator() { static hiprandGenerator_t gen; static bool init; if (!init) { CallCurand(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CallCurand(hiprandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { hiprandGenerator_t &gen = GetGenerator(); CallCurand(hiprandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(dtype **ins, int *in_dims, dtype **outs, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatForward), dim3(block_count), dim3(TPB), 0, 0, in_val_arr.value, in_dim_arr.value, val_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelConcatBackward(dtype** in_losses, int *in_dims, dtype **out_losses, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatBackward), dim3(block_count), dim3(TPB), 0, 0, in_loss_arr.value, in_dim_arr.value, loss_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); CheckCudaError(); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); CheckCudaError(); } void *Malloc(int size) { void *p; CallCuda(hipMalloc(&p, size)); return p; } __global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count ; i += step) { int count_i = i / dim; int dim_i = i % dim; p[count_i][dim_i] = value; } } void BatchMemset(const std::vector<dtype*> &vec, int count, int dim, dtype value) { int block_count = (count * dim -1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); hipLaunchKernelGGL(( KernelBatchMemset), dim3(block_count), dim3(TPB), 0, 0, vec_arr.value, count, dim, value); CheckCudaError(); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, int count, int dim, std::vector<dtype*> &vals) { int block_count = ::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelLookupForward), dim3(block_count), dim3(TPB), 0, 0, xid_arr.value, vocabulary, count, dim, const_cast<dtype**>(val_arr.value)); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, int unknown_id, bool fine_tune, const dtype** losses, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid == unknown_id || fine_tune) { assert(xid >= 0); if (dim_i == 0) { indexers[xid] = true; } DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, int unknown_id, bool fine_tune, const std::vector<dtype*> &losses, int count, int dim, dtype *grad, bool *indexers) { int block_count = ::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); hipLaunchKernelGGL(( KernelLookupBackward), dim3(block_count), dim3(TPB), 0, 0, const_cast<const int *>(xid_arr.value), unknown_id, fine_tune, const_cast<const dtype**>(loss_arr.value), count, dim, grad, indexers); CheckCudaError(); } __global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins, int *in_counts, int max_in_count, dtype **outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -INFINITY : INFINITY; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelPoolForward), dim3(block_dim), dim3(thread_count), thread_count * 2 * sizeof(dtype), 0, pooling, in_val_arr.value, in_count_arr.value, max_in_count, val_arr.value, count, dim, hit_inputs); CheckCudaError(); } __global__ void KernelPoolBackward(const dtype ** losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype **in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelPoolBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype **in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype **vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelSumPoolForward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, pooling, (const dtype**)in_val_arr.value, count, dim, (const int*)in_count_arr.value, max_in_count, val_arr.value); CheckCudaError(); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses, const int *in_counts, int max_in_count, int count, int dim, dtype **in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; if (blockIdx.y < in_counts[blockIdx.x] && threadIdx.x < dim) { atomicAdd(in_losses[global_in_count_i] + threadIdx.x, pooling == PoolingEnum::SUM ? losses[blockIdx.x][threadIdx.x] : losses[blockIdx.x][threadIdx.x] / in_counts[blockIdx.x]); } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); hipLaunchKernelGGL(( KernelSumBackward), dim3(block_dim), dim3(thread_count), 0, 0, pooling, (const dtype**)loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelScalarAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void ScalarAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelScalarAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count * sizeof(dtype), 0, (const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); CheckCudaError(); } __global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i __shared__ extern volatile dtype att_mask_loss_shared_arr[]; int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]); } att_mask_loss_shared_arr[threadIdx.x] = 0.0f; for (int i = threadIdx.x; i < dim; i += blockDim.x) { att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { att_mask_loss_shared_arr[threadIdx.x] += att_mask_loss_shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0]; } } void ScalarAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } hipLaunchKernelGGL(( KernelScalarAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); CheckCudaError(); } __global__ void KernelScalarAttentionBackward(const dtype** masks, const dtype *mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], -shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]); } } void ScalarAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); NumberArray mask_loss_arr; mask_loss_arr.init(count * max_in_count); ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } hipLaunchKernelGGL(( KernelScalarAttentionBackward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value, (const dtype*)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); CheckCudaError(); } __global__ void KernelVectorAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void VectorAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelVectorAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count * sizeof(dtype), 0, (const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); CheckCudaError(); } __global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * i + blockIdx.x]); mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] = losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } } void VectorAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype** masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } hipLaunchKernelGGL(( KernelVectorAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); CheckCudaError(); } __global__ void KernelVectorAttentionBackward(const dtype** masks, const dtype **mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, -shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } } void VectorAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); std::vector<std::shared_ptr<NumberArray>> mask_losses; mask_losses.reserve(count); for (int i = 0; i < count; ++i) { std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>(); p->init(max_in_count * dim); mask_losses.push_back(p); } std::vector<dtype*> raw_mask_losses; raw_mask_losses.reserve(count); for (auto &p : mask_losses) { raw_mask_losses.push_back(p->value); } NumberPointerArray mask_loss_arr; mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size()); VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } hipLaunchKernelGGL(( KernelVectorAttentionBackward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value, (const dtype**)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, dtype** vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); hipLaunchKernelGGL(( KernelPMultiForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)ins1_arr.value, (const dtype**)ins2_arr.value, count, dim, vals_arr.value); CheckCudaError(); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype** in_losses1, dtype** in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); hipLaunchKernelGGL(( KernelPMultiBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)losses_arr.value, (const dtype**)in_vals1_arr.value, (const dtype**)in_vals2_arr.value, count, dim, in_losses1_arr.value, in_losses2_arr.value); CheckCudaError(); } __global__ void KernelPAddForward(const dtype*** ins, int count, int dim, int in_count, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } } __global__ void KernelPDotForward(const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype** vals) { volatile __shared__ extern dtype shared_val[]; if (threadIdx.x < dim) { shared_val[threadIdx.x] = in_vals1[blockIdx.x][threadIdx.x] * in_vals2[blockIdx.x][threadIdx.x]; } else { shared_val[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_val[threadIdx.x] += shared_val[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.x][0] = shared_val[0]; } } void PDotForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { NumberPointerArray in1_arr, in2_arr, val_arr; in1_arr.init((dtype**)ins1.data(), ins1.size()); in2_arr.init((dtype**)ins2.data(), ins2.size()); val_arr.init((dtype**)vals.data(), vals.size()); int thread_count = NextTwoIntegerPowerNumber(dim); hipLaunchKernelGGL(( KernelPDotForward), dim3(count), dim3(thread_count), thread_count * sizeof(dtype), 0, ( const dtype**)in1_arr.value, (const dtype**)in2_arr.value, count, dim, val_arr.value); CheckCudaError(); } __global__ void KernelPDotBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype **in_losses1, dtype **in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; atomicAdd(in_losses1[count_i] + dim_i, losses[count_i][0] * in_vals2[count_i][dim_i]); atomicAdd(in_losses2[count_i] + dim_i, losses[count_i][0] * in_vals1[count_i][dim_i]); } } void PDotBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { NumberPointerArray in1_loss_arr, in2_loss_arr, loss_arr, in_val1_arr, in_val2_arr; in1_loss_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in2_loss_arr.init((dtype**)in_losses2.data(), in_losses2.size()); loss_arr.init((dtype**)losses.data(), losses.size()); in_val1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_val2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelPDotBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, (const dtype**)in_val1_arr.value, (const dtype**)in_val2_arr.value, count, dim, in1_loss_arr.value, in2_loss_arr.value); CheckCudaError(); } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back(ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelPAddForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype***)in_arr.value, count, dim, in_count, out_arr.value); CheckCudaError(); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, dtype ***in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back(ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); hipLaunchKernelGGL(( KernelPAddBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)out_loss_arr.value, count, dim, in_count, in_loss_arr.value); CheckCudaError(); } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); hipLaunchKernelGGL(( KernelSoftMaxLoss), dim3(count), dim3(thread_count), 0, 0, const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); CheckCudaError(); } __global__ void Predict(const dtype *val, int dim, int *result) { __shared__ volatile dtype shared_vals[TPB]; __shared__ volatile dtype shared_indexes[TPB]; shared_indexes[threadIdx.x] = threadIdx.x; if (threadIdx.x < dim) { shared_vals[threadIdx.x] = val[threadIdx.x]; } else { shared_vals[threadIdx.x] = -10000000.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_vals[threadIdx.x] < shared_vals[threadIdx.x + i]) { shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i]; shared_indexes[threadIdx.x] = shared_indexes[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_indexes[0]; } } int Predict(const dtype* val, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); DeviceInt result; result.init(); hipLaunchKernelGGL(( Predict), dim3(1), dim3(thread_count), 0, 0, val, dim, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelMax(const dtype *const *v, int count, int dim, dtype *block_maxes, int *block_max_is, int *block_counters, int *max_indexes, dtype *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile dtype shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -INFINITY; int max_i; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; } } } __global__ void KernelSingleMax(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { for (int count_i = 0; count_i < count; ++count_i) { dtype max_val = -INFINITY; int max_i; for (int dim_i = 0; dim_i < dim; ++ dim_i) { if (v[count_i][dim_i] > max_val) { max_val = v[count_i][dim_i]; max_i = dim_i; } } max_indexes[count_i] = max_i; max_vals[count_i] = max_val; } } void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); // cout << format("Max count:%1% dim:%2% thread_count:%3% block_y_count:%4%") % count % dim % thread_count // % block_y_count << endl; NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); hipLaunchKernelGGL(( KernelMax), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_maxes.value, block_max_is.value, block_counters.value, max_indexes, max_vals); #if TEST_CUDA NumberArray max_val_arr; IntArray max_indexer_arr; max_val_arr.init(count); max_indexer_arr.init(count); hipLaunchKernelGGL(( KernelSingleMax), dim3(1), dim3(1), 0, 0, v, count, dim, max_indexer_arr.value, max_val_arr.value); vector<int> max_indexer_target(count), max_indexer_gold(count); MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), hipMemcpyDeviceToHost); MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < count; ++i) { if (max_indexer_target.at(i) != max_indexer_gold.at(i)) { cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i) % max_indexer_gold.at(i) << endl; abort(); } } #endif CheckCudaError(); } __global__ void KernelExp(const dtype *const *in, int count, int dim, const dtype *number_to_sub, dtype *const *out) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; out[count_i][dim_i] = cuda_exp(in[count_i][dim_i] - number_to_sub[count_i]); } } void Exp(const dtype *const *in, int count, int dim, const dtype *number_to_sub, dtype *const *out) { int block_count = DefaultBlockCount(dim * count); //cout << format("Exp count:%1% dim:%2% block_count:%3%") % count % dim % block_count << endl; hipLaunchKernelGGL(( KernelExp), dim3(block_count), dim3(TPB), 0, 0, in, count, dim, number_to_sub, out); CheckCudaError(); } __global__ void KernelSum(const dtype *const *v, int count, int dim, dtype *block_sums, int *block_counters, dtype *sum_vals) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { sum_vals[count_i] = shared_sum[0]; } } } void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); hipLaunchKernelGGL(( KernelSum), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_sums.value, block_counters.value, sum_vals); CheckCudaError(); } __global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype **grads, dtype *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype loss = exps[count_i][dim_i] / sums[count_i]; if (dim_i == answers[count_i]) { loss -= 1.0f; } grads[count_i][dim_i] = loss * reverse_batchsize; losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i]) * reverse_batchsize; } } void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype **grads, dtype *losses) { int block_count = DefaultBlockCount(dim * count); hipLaunchKernelGGL(( KernelSoftMaxLossByExp), dim3(block_count), dim3(TPB), 0, 0, exps, count, dim, vals, sums, max_vals, answers, reverse_batchsize, grads, losses); CheckCudaError(); } std::pair<dtype, std::vector<int>> SoftMaxLoss(const std::vector<const dtype *> &vals_vector, int count, int dim, const std::vector<int> &gold_answers, int batchsize, const std::vector<dtype *> &losses_vector) { IntArray answer_arr, gold_answer_arr; answer_arr.init(count); gold_answer_arr.init((int*)gold_answers.data(), count); NumberArray max_vals, sum_vals; max_vals.init(count); sum_vals.init(count); NumberPointerArray vals, losses; vals.init((dtype**)vals_vector.data(), count); losses.init((dtype**)losses_vector.data(), count); Max(vals.value, count, dim, answer_arr.value, max_vals.value); Exp(vals.value, count, dim, max_vals.value, losses.value); Sum(losses.value, count, dim, sum_vals.value); NumberArray loss_arr; loss_arr.init(count); SoftMaxLossByExp(losses.value, count, dim, vals.value, sum_vals.value, max_vals.value, gold_answer_arr.value, 1.0 / batchsize, losses.value, loss_arr.value); vector<int> answers(count); MyCudaMemcpy(answers.data(), answer_arr.value, count * sizeof(int), hipMemcpyDeviceToHost); vector<dtype> loss_vector(count); MyCudaMemcpy(loss_vector.data(), loss_arr.value, count * sizeof(dtype), hipMemcpyDeviceToHost); dtype loss_sum = accumulate(loss_vector.begin(), loss_vector.end(), 0.0f); return std::make_pair(loss_sum, answers); } __global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, len, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, indexers, count, dim, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); hipLaunchKernelGGL(( KernelRescale), dim3(block_count), dim3(TPB), 0, 0, v, len, scale); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (!is_bias) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / row; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); CheckCudaError(); block_count = DefaultBlockCount(col); hipLaunchKernelGGL(( KernelSelfPlusIters), dim3(block_count), dim3(TPB), 0, 0, indexers, iters, col); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, alpha, reg, eps); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, indexers, alpha, reg, eps); CheckCudaError(); } void *GraphHostAlloc() { void *m; CallCuda(hipHostMalloc(&m, 10000000, hipHostMallocWriteCombined)); if (m == NULL) { abort(); } return m; } }
5c27a946911d6f1857f5a534b355e607ba4b2f7f.cu
#include "N3LDG_cuda.h" #include <array> #include <boost/format.hpp> #include <cstdlib> #include <cstddef> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <cublas_v2.h> #include "Printf_cuda.cuh" #include "Printf_cuda.cu" #include "Memory_cuda.h" #include <curand.h> #include <curand_kernel.h> #include "cnmem.h" #include <string> #include <utility> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" namespace n3ldg_cuda { using namespace std; using boost::format; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #define cuda_log(x) logf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #define cuda_log(x) log(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(cudaError_t status) { if (status != cudaSuccess) { cout << "cuda error:" << cudaGetErrorString(status) << endl; abort(); } } #if TEST_CUDA void CheckCudaError() { cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cout << "cuda error:" << cudaGetErrorName(error) << std::endl; std::cout << "cuda error:" << cudaGetErrorString(error) << std::endl; abort(); } } #else #define CheckCudaError() #endif void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(cublasStatus_t status) { assert(status == CUBLAS_STATUS_SUCCESS); } void CallCurand(curandStatus status) { assert(status == CURAND_STATUS_SUCCESS); } cublasHandle_t& GetCublasHandle() { static cublasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(cublasCreate(&handle)); } return handle; } cudaError_t MyCudaMemcpy(void *dest, const void *src, size_t count, cudaMemcpyKind kind) { cudaError_t e; e = cudaMemcpy(dest, src, count, kind); CallCuda(e); return e; } void NumberPointerArray::init(dtype **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), cudaMemcpyHostToDevice)); this->len = len; } NumberPointerArray::~NumberPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } void NumberPointerPointerArray::init(dtype ***host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), cudaMemcpyHostToDevice)); this->len = len; } NumberPointerPointerArray::~NumberPointerPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void NumberArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype))); this->len = len; } void NumberArray::init(dtype *host_arr, int len) { init(len); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype), cudaMemcpyHostToDevice)); } NumberArray::~NumberArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), cudaMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), cudaMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), cudaMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntPointerArray::init(int **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*), cudaMemcpyHostToDevice)); this->len = len; } IntPointerArray::~IntPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntArray::init(int *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int), cudaMemcpyHostToDevice)); this->len = len; } void IntArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); this->len = len; } IntArray::~IntArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void BoolArray::init(bool *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), cudaMemcpyHostToDevice)); this->len = len; } void BoolArray::copyFromHost(bool *host_arr) { CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), cudaMemcpyHostToDevice)); } void BoolArray::copyToHost(bool *host_arr) { CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool), cudaMemcpyDeviceToHost)); } BoolArray::~BoolArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnMemoryAndDevice(int dim) { initOnDevice(dim); v = new dtype[dim]; zero(); } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), cudaMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), cudaMemcpyDeviceToHost)); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, cudaMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), cudaMemcpyDeviceToHost)); } void Assert(bool v) { #if TEST_CUDA if (!v) { abort(); } #endif } __device__ void DeviceAtomicAdd(dtype* address, dtype value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0); }; __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), cudaMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return std::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; KernelZero<<<block_count, TPB>>>(v, len); CheckCudaError(); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%f\n", p[i]); } } void PrintNums(const dtype* p, int len) { KernelPrintNums<<<1, 1>>>(p, len); cudaDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { KernelPrintInts<<<1, 1>>>(p, len); cudaDeviceSynchronize(); CheckCudaError(); } void InitCuda(int device_id) { std::cout << "device_id:" << device_id << std::endl; CallCuda(cudaSetDeviceFlags(cudaDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 10000000000; device.device = device_id; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(cudaSetDevice(device_id)); #endif CallCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); CallCuda(cudaPrintfInit()); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype **dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelCopyFromOneVectorToMultiVectors<<<block_count, TPB>>>(src, val_arr.value, count, len); CheckCudaError(); } void CopyFromHostToDevice(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_src == NULL) { std::cout << "out of memory!" << std::endl; abort(); } for (int i = 0; i < count; ++i) { memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype)); } dtype *long_dest = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest, count * dim * sizeof(dtype*))); CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype*), cudaMemcpyHostToDevice)); CopyFromOneVectorToMultiVals(long_dest, dest, count, dim); free(long_src); CallCuda(MemoryPool::Ins().Free(long_dest)); } __global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[i] = src[count_i][len_i]; } } void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src, dtype *dest, int count, int len) { NumberPointerArray src_arr; src_arr.init((dtype**)src.data(), src.size()); int block_count = DefaultBlockCount(len * count); KernelCopyFromMultiVectorsToOneVector<<<block_count, TPB>>>( (const dtype**)src_arr.value, dest, count, len); CheckCudaError(); } void CopyFromDeviceToHost(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_src, count * dim * sizeof(dtype*))); CopyFromMultiVectorsToOneVector(src, long_src, count, dim); dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_dest == NULL) { std::cout << "out of memory!" << std::endl; abort(); } CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype), cudaMemcpyDeviceToHost)); for (int i = 0; i < count; ++i) { memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype)); } CallCuda(MemoryPool::Ins().Free(long_src)); free(long_dest); } __global__ void KernelActivated(ActivatedEnum activated, const dtype *src, dtype**dest, dtype* dest2, int count, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; for (int i = index; i < len * count; i += step) { int count_i = i / len; int len_i = i % len; dtype result; if (activated == ActivatedEnum::TANH) { result = cuda_tanh(src[i]); } else if (activated == ActivatedEnum::SIGMOID) { result = cuda_sigmoid(src[i]); } else if (activated == ActivatedEnum::RELU) { result = cuda_relu(src[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { result = cuda_leaky_relu(src[i]); } else if (activated == ActivatedEnum::SELU) { result = cuda_selu(src[i]); } else { printf("KernelActivated error\n"); return; } dest[count_i][len_i] = result; dest2[i] = result; } } void Activated(ActivatedEnum activated, const dtype *src, const std::vector<dtype*>& dest, dtype *dest2, int len) { int count = dest.size(); NumberPointerArray dest_arr; dest_arr.init((dtype**)dest.data(), dest.size()); int block_count = std::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT); KernelActivated<<<block_count, TPB>>>(activated, src, dest_arr.value, dest2, count, len); CheckCudaError(); } __global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs, int count, int dim, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else { printf("error\n"); } } } void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs, int count, int dim, std::vector<dtype*> &ys) { NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelTanhForward<<<block_count, TPB>>>(activated, (const dtype**)x_arr.value, count, dim, y_arr.value); CheckCudaError(); } __global__ void KernelTanhBackward(ActivatedEnum activated, const dtype **losses, const dtype **vals, int count, int dim, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype v; if (activated == ActivatedEnum::TANH) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] * vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) * vals[count_i][dim_i]; } atomicAdd(in_losses[count_i] + dim_i, v); } } void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, std::vector<dtype*> &in_losses) { NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelTanhBackward<<<block_count, TPB>>>(activated ,(const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelDropoutForward(const dtype** xs, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } else { ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0 || drop_factor >= 1.0f) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutForward<<<block_count, TPB>>>((const dtype**)x_arr.value, count, dim, is_training, drop_mask, drop_factor, y_arr.value); CheckCudaError(); } __global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] >= drop_factor) { atomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } else { atomicAdd(in_losses[count_i] + dim_i, (1 - drop_factor) * losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0 || drop_factor >= 1) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, is_training, drop_mask, drop_factor, in_loss_arr.value); CheckCudaError(); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int count_i = i / x_len; int len_i = i % x_len; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = std::min((count * len - 1 + TPB) / TPB, 56); KernelCopyForUniNodeForward<<<block_count, TPB>>>( (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); CheckCudaError(); } __global__ void KernelCopyForBiNodeForward(const dtype **x1s, const dtype **x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int x1_total_len = count * x1_len; int x2_total_len = count * x2_len; int b_total_len = count * b_len; int total_len = x1_total_len + x2_total_len + b_total_len; for (int i = index; i < total_len; i += step) { if (i < x2_total_len) { int len_i = i % x2_len; int count_i = i / x2_len; x2s_dest[i] = x2s[count_i][len_i]; } else if (i >= x2_total_len && i < x1_total_len + x2_total_len) { int len_i = (i - x2_total_len) % x1_len; int count_i = (i - x2_total_len) / x1_len; x1s_dest[i - x2_total_len] = x1s[count_i][len_i]; } else { int b_i = (i - x1_total_len - x2_total_len); int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForBiNodeForward(const std::vector<dtype*>& x1s, const std::vector<dtype *>& x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int len = x1_len + x2_len + b_len; int block_count = DefaultBlockCount(count * len); NumberPointerArray x1_arr, x2_arr; x1_arr.init((dtype**)x1s.data(), x1s.size()); x2_arr.init((dtype**)x2s.data(), x2s.size()); KernelCopyForBiNodeForward<<<block_count, TPB>>>( (const dtype**)x1_arr.value, (const dtype**)x2_arr.value, b, x1s_dest, x2s_dest, b_dest, count, x1_len, x2_len, b_len); CheckCudaError(); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { cublasHandle_t &handle = GetCublasHandle(); dtype alpha = 1; dtype beta = useb? 1 : 0; cublasOperation_t x_op = should_x_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldx = should_x_transpose ? count : col; cublasOperation_t W_op = should_W_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldw = should_W_transpose ? col : row; #if USE_FLOAT CallCublas(cublasSgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #else CallCublas(cublasDgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.0001) { *success = false; printf("KernelVerify %s: host:%f device:%f loss:%f index:%d\n", message, host[index], device[index], loss, index); KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } constexpr int MAX_BLOCK_POWER = 100; MemoryPool& MemoryPool::Ins() { static MemoryPool *p; if (p == NULL) { p = new MemoryPool; p->free_blocks_.resize(MAX_BLOCK_POWER + 1); p->busy_blocks_.reserve(10000); } return *p; } void appendFreeBlock(const MemoryBlock &memory_block, vector<map<void*, MemoryBlock>> &free_blocks, int i, const unordered_map<void*, MemoryBlock> &busy_blocks) { if (memory_block.size != (1 << i)) { cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i << endl; abort(); } free_blocks.at(i).insert(make_pair(memory_block.p, memory_block)); } cudaError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return cudaSuccess; #elif DEVICE_MEMORY == 1 cudaError_t r = cudaMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } cudaError_t status = cudaErrorMemoryAllocation; int loop = 0; while (status != cudaSuccess) { //cout << "n:" << n << endl; if (free_blocks_.at(n).empty()) { //cout << "free_blocks_.at(n).empty()" << endl; int higher_power = n + 1; //cout << "higher_power:" << higher_power << endl; while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) { ++higher_power; } //cout << "higher_power:" << higher_power << endl; if (higher_power > MAX_BLOCK_POWER) { while (status != cudaSuccess) { status = cudaMalloc(p, fit_size); } CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); //cout << "malloc successfully" << endl; } else { //cout << "higher_power:" << higher_power << endl; auto &v = free_blocks_.at(higher_power); MemoryBlock &to_split = v.rbegin()->second; int half_size = to_split.size >> 1; void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) + half_size); MemoryBlock low_block(to_split.p, half_size, to_split.buddy), high_block(half_address, half_size, to_split.p); v.erase(v.rbegin()->first); appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_); appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_); } } else { status = cudaSuccess; int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).rbegin()->second; *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first); } ++loop; } profiler.EndEvent(); return status; #endif } std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "a.size is not equal to b.size" << endl; abort(); } int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p); if (distance == 0) { cerr << "block a and b has the same address" << endl; abort(); } const MemoryBlock &low = distance > 0 ? b : a; const MemoryBlock &high = distance > 0 ? a : b; return std::make_pair(&low, &high); } bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { return false; } auto pair = lowerAndhigherBlocks(a, b); return pair.second->buddy == pair.first->p && ((char*)pair.second->p - (char*)pair.first->p) == a.size; } MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "sizes of memory blocks to merge not equal" << endl; abort(); } auto pair = lowerAndhigherBlocks(a, b); if ((char*)pair.second->p - (char*)pair.first->p != a.size || (a.p != b.buddy && a.buddy != b.p)) { cerr << "a and b are not buddies" << endl; cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl; abort(); } MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy); return block; } void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks, int power, const unordered_map<void*, MemoryBlock> &busy_blocks) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("returnFreeBlock"); MemoryBlock current_block = block; for (int i = power; i <= MAX_BLOCK_POWER; ++i) { map<void*, MemoryBlock> &v = free_blocks.at(i); void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ? current_block.buddy : (void*)((char*)current_block.p + current_block.size); auto it = v.find(free_p); if (it == v.end() || (it->second.p != current_block.buddy && it->second.buddy != current_block.p)) { appendFreeBlock(current_block, free_blocks, i, busy_blocks); break; } else { MemoryBlock merged_block = mergeBlocks(it->second, current_block); current_block = merged_block; v.erase(it); } } profiler.EndEvent(); } cudaError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 cudaError_t r = cudaFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { cerr << "cannot find busy block " << p << endl; abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } if (it->second.size != (1 << n)) { cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl; abort(); } auto block = it->second; busy_blocks_.erase(it); returnFreeBlock(block, free_blocks_, n, busy_blocks_); it = busy_blocks_.find(p); if (it != busy_blocks_.end()) { cerr << "can find erased block " << p << endl; abort(); } profiler.EndEvent(); if (busy_blocks_.find(p) != busy_blocks_.end()) { cerr << boost::format("Malloc - find freed p in busy blocks") << endl; } return cudaSuccess; #endif } void Profiler::EndCudaEvent() { cudaDeviceSynchronize(); EndEvent(); } __global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated, const dtype *const*ly, const dtype *ty, const dtype *y, dtype *lty, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype yi = y[i]; dtype lyv = ly[count_i][dim_i]; if (activated == ActivatedEnum::TANH) { lty[i] = lyv * cuda_dtanh(yi); } else if (activated == ActivatedEnum::SIGMOID) { lty[i] = lyv * cuda_dsigmoid(yi); } else if (activated == ActivatedEnum::RELU) { lty[i] = lyv * cuda_drelu(ty[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { lty[i] = lyv * cuda_dleaky_relu(ty[i]); } else if (activated == ActivatedEnum::SELU) { lty[i] = lyv * cuda_dselu(ty[i], yi); } else { printf("KernelCalculateLtyForUniBackward error\n"); } } } void CalculateLtyForUniBackward(ActivatedEnum activated, const std::vector<dtype*> &ly, const dtype *ty, const dtype *y, dtype *lty, int count, int dim) { NumberPointerArray ly_arr; ly_arr.init((dtype**)ly.data(), ly.size()); int block_count = std::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); KernelCalculateLtyForUniBackward<<<block_count, TPB>>>(activated, ly_arr.value, ty, y, lty, count, dim); CheckCudaError(); cudaDeviceSynchronize(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype **losses, int count, int out_dim, int in_dim, dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = count_i * out_dim + dim_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward<<<block_dim, TPB>>>(lty, lx, b, loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); CheckCudaError(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype **losses1, dtype **losses2, int count, int out_dim, int in_dim1, int in_dim2, dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } //int lty_index = dim_i * count + count_i; int lty_index = dim_i + count_i * out_dim; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim1; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i + count_i * in_dim2; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward<<<block_dim, TPB>>>(lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count, out_dim, in_dim1, in_dim2, block_sums.value, global_block_count_arr.value); CheckCudaError(); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(curandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { curand_init(0, i, 0, &states[i]); } } curandState_t *GetCurandStates() { static curandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(curandState_t) * MAX_BATCH_COUNT)); KernelInitCurandStates<<<BLOCK_COUNT, TPB>>>( states); CheckCudaError(); } return states; } curandGenerator_t &GetGenerator() { static curandGenerator_t gen; static bool init; if (!init) { CallCurand(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CallCurand(curandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { curandGenerator_t &gen = GetGenerator(); CallCurand(curandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(dtype **ins, int *in_dims, dtype **outs, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatForward<<<block_count, TPB>>>(in_val_arr.value, in_dim_arr.value, val_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelConcatBackward(dtype** in_losses, int *in_dims, dtype **out_losses, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatBackward<<<block_count, TPB>>>(in_loss_arr.value, in_dim_arr.value, loss_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); CheckCudaError(); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); CheckCudaError(); } void *Malloc(int size) { void *p; CallCuda(cudaMalloc(&p, size)); return p; } __global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count ; i += step) { int count_i = i / dim; int dim_i = i % dim; p[count_i][dim_i] = value; } } void BatchMemset(const std::vector<dtype*> &vec, int count, int dim, dtype value) { int block_count = (count * dim -1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); KernelBatchMemset<<<block_count, TPB>>>(vec_arr.value, count, dim, value); CheckCudaError(); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, int count, int dim, std::vector<dtype*> &vals) { int block_count = std::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelLookupForward<<<block_count, TPB>>>(xid_arr.value, vocabulary, count, dim, const_cast<dtype**>(val_arr.value)); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, int unknown_id, bool fine_tune, const dtype** losses, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid == unknown_id || fine_tune) { assert(xid >= 0); if (dim_i == 0) { indexers[xid] = true; } DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, int unknown_id, bool fine_tune, const std::vector<dtype*> &losses, int count, int dim, dtype *grad, bool *indexers) { int block_count = std::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); KernelLookupBackward<<<block_count, TPB>>>( const_cast<const int *>(xid_arr.value), unknown_id, fine_tune, const_cast<const dtype**>(loss_arr.value), count, dim, grad, indexers); CheckCudaError(); } __global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins, int *in_counts, int max_in_count, dtype **outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -INFINITY : INFINITY; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelPoolForward<<<block_dim, thread_count, thread_count * 2 * sizeof(dtype)>>>(pooling, in_val_arr.value, in_count_arr.value, max_in_count, val_arr.value, count, dim, hit_inputs); CheckCudaError(); } __global__ void KernelPoolBackward(const dtype ** losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype **in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelPoolBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype **in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype **vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelSumPoolForward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(pooling, (const dtype**)in_val_arr.value, count, dim, (const int*)in_count_arr.value, max_in_count, val_arr.value); CheckCudaError(); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses, const int *in_counts, int max_in_count, int count, int dim, dtype **in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; if (blockIdx.y < in_counts[blockIdx.x] && threadIdx.x < dim) { atomicAdd(in_losses[global_in_count_i] + threadIdx.x, pooling == PoolingEnum::SUM ? losses[blockIdx.x][threadIdx.x] : losses[blockIdx.x][threadIdx.x] / in_counts[blockIdx.x]); } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); KernelSumBackward<<<block_dim, thread_count>>>(pooling, (const dtype**)loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, in_loss_arr.value); CheckCudaError(); } __global__ void KernelScalarAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void ScalarAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelScalarAttentionForward<<<block_dim, thread_count, 2 * thread_count * sizeof(dtype)>>>((const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); CheckCudaError(); } __global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i __shared__ extern volatile dtype att_mask_loss_shared_arr[]; int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]); } att_mask_loss_shared_arr[threadIdx.x] = 0.0f; for (int i = threadIdx.x; i < dim; i += blockDim.x) { att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { att_mask_loss_shared_arr[threadIdx.x] += att_mask_loss_shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0]; } } void ScalarAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } KernelScalarAttentionMaskAndInLoss<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); CheckCudaError(); } __global__ void KernelScalarAttentionBackward(const dtype** masks, const dtype *mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], -shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]); } } void ScalarAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); NumberArray mask_loss_arr; mask_loss_arr.init(count * max_in_count); ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } KernelScalarAttentionBackward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value, (const dtype*)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); CheckCudaError(); } __global__ void KernelVectorAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void VectorAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelVectorAttentionForward<<<block_dim, thread_count, 2 * thread_count * sizeof(dtype)>>>((const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); CheckCudaError(); } __global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * i + blockIdx.x]); mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] = losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } } void VectorAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype** masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } KernelVectorAttentionMaskAndInLoss<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); CheckCudaError(); } __global__ void KernelVectorAttentionBackward(const dtype** masks, const dtype **mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, -shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } } void VectorAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); std::vector<std::shared_ptr<NumberArray>> mask_losses; mask_losses.reserve(count); for (int i = 0; i < count; ++i) { std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>(); p->init(max_in_count * dim); mask_losses.push_back(p); } std::vector<dtype*> raw_mask_losses; raw_mask_losses.reserve(count); for (auto &p : mask_losses) { raw_mask_losses.push_back(p->value); } NumberPointerArray mask_loss_arr; mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size()); VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } KernelVectorAttentionBackward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value, (const dtype**)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, dtype** vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); KernelPMultiForward<<<block_count, TPB>>>((const dtype**)ins1_arr.value, (const dtype**)ins2_arr.value, count, dim, vals_arr.value); CheckCudaError(); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype** in_losses1, dtype** in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); KernelPMultiBackward<<<block_count, TPB>>>((const dtype**)losses_arr.value, (const dtype**)in_vals1_arr.value, (const dtype**)in_vals2_arr.value, count, dim, in_losses1_arr.value, in_losses2_arr.value); CheckCudaError(); } __global__ void KernelPAddForward(const dtype*** ins, int count, int dim, int in_count, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } } __global__ void KernelPDotForward(const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype** vals) { volatile __shared__ extern dtype shared_val[]; if (threadIdx.x < dim) { shared_val[threadIdx.x] = in_vals1[blockIdx.x][threadIdx.x] * in_vals2[blockIdx.x][threadIdx.x]; } else { shared_val[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_val[threadIdx.x] += shared_val[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.x][0] = shared_val[0]; } } void PDotForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { NumberPointerArray in1_arr, in2_arr, val_arr; in1_arr.init((dtype**)ins1.data(), ins1.size()); in2_arr.init((dtype**)ins2.data(), ins2.size()); val_arr.init((dtype**)vals.data(), vals.size()); int thread_count = NextTwoIntegerPowerNumber(dim); KernelPDotForward<<<count, thread_count, thread_count * sizeof(dtype)>>>(( const dtype**)in1_arr.value, (const dtype**)in2_arr.value, count, dim, val_arr.value); CheckCudaError(); } __global__ void KernelPDotBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, dtype **in_losses1, dtype **in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; atomicAdd(in_losses1[count_i] + dim_i, losses[count_i][0] * in_vals2[count_i][dim_i]); atomicAdd(in_losses2[count_i] + dim_i, losses[count_i][0] * in_vals1[count_i][dim_i]); } } void PDotBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { NumberPointerArray in1_loss_arr, in2_loss_arr, loss_arr, in_val1_arr, in_val2_arr; in1_loss_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in2_loss_arr.init((dtype**)in_losses2.data(), in_losses2.size()); loss_arr.init((dtype**)losses.data(), losses.size()); in_val1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_val2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); int block_count = DefaultBlockCount(count * dim); KernelPDotBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, (const dtype**)in_val1_arr.value, (const dtype**)in_val2_arr.value, count, dim, in1_loss_arr.value, in2_loss_arr.value); CheckCudaError(); } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back(ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); KernelPAddForward<<<block_count, TPB>>>((const dtype***)in_arr.value, count, dim, in_count, out_arr.value); CheckCudaError(); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, dtype ***in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back(ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); KernelPAddBackward<<<block_count, TPB>>>((const dtype**)out_loss_arr.value, count, dim, in_count, in_loss_arr.value); CheckCudaError(); } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); KernelSoftMaxLoss<<<count, thread_count>>>( const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); CheckCudaError(); } __global__ void Predict(const dtype *val, int dim, int *result) { __shared__ volatile dtype shared_vals[TPB]; __shared__ volatile dtype shared_indexes[TPB]; shared_indexes[threadIdx.x] = threadIdx.x; if (threadIdx.x < dim) { shared_vals[threadIdx.x] = val[threadIdx.x]; } else { shared_vals[threadIdx.x] = -10000000.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_vals[threadIdx.x] < shared_vals[threadIdx.x + i]) { shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i]; shared_indexes[threadIdx.x] = shared_indexes[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_indexes[0]; } } int Predict(const dtype* val, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); DeviceInt result; result.init(); Predict<<<1, thread_count>>>(val, dim, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelMax(const dtype *const *v, int count, int dim, dtype *block_maxes, int *block_max_is, int *block_counters, int *max_indexes, dtype *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile dtype shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -INFINITY; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -INFINITY; int max_i; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; } } } __global__ void KernelSingleMax(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { for (int count_i = 0; count_i < count; ++count_i) { dtype max_val = -INFINITY; int max_i; for (int dim_i = 0; dim_i < dim; ++ dim_i) { if (v[count_i][dim_i] > max_val) { max_val = v[count_i][dim_i]; max_i = dim_i; } } max_indexes[count_i] = max_i; max_vals[count_i] = max_val; } } void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); // cout << format("Max count:%1% dim:%2% thread_count:%3% block_y_count:%4%") % count % dim % thread_count // % block_y_count << endl; NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); KernelMax<<<block_dim, thread_count>>>(v, count, dim, block_maxes.value, block_max_is.value, block_counters.value, max_indexes, max_vals); #if TEST_CUDA NumberArray max_val_arr; IntArray max_indexer_arr; max_val_arr.init(count); max_indexer_arr.init(count); KernelSingleMax<<<1, 1>>>(v, count, dim, max_indexer_arr.value, max_val_arr.value); vector<int> max_indexer_target(count), max_indexer_gold(count); MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), cudaMemcpyDeviceToHost); MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < count; ++i) { if (max_indexer_target.at(i) != max_indexer_gold.at(i)) { cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i) % max_indexer_gold.at(i) << endl; abort(); } } #endif CheckCudaError(); } __global__ void KernelExp(const dtype *const *in, int count, int dim, const dtype *number_to_sub, dtype *const *out) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; out[count_i][dim_i] = cuda_exp(in[count_i][dim_i] - number_to_sub[count_i]); } } void Exp(const dtype *const *in, int count, int dim, const dtype *number_to_sub, dtype *const *out) { int block_count = DefaultBlockCount(dim * count); //cout << format("Exp count:%1% dim:%2% block_count:%3%") % count % dim % block_count << endl; KernelExp<<<block_count, TPB>>>(in, count, dim, number_to_sub, out); CheckCudaError(); } __global__ void KernelSum(const dtype *const *v, int count, int dim, dtype *block_sums, int *block_counters, dtype *sum_vals) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { sum_vals[count_i] = shared_sum[0]; } } } void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); KernelSum<<<block_dim, thread_count>>>(v, count, dim, block_sums.value, block_counters.value, sum_vals); CheckCudaError(); } __global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype **grads, dtype *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype loss = exps[count_i][dim_i] / sums[count_i]; if (dim_i == answers[count_i]) { loss -= 1.0f; } grads[count_i][dim_i] = loss * reverse_batchsize; losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i]) * reverse_batchsize; } } void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype **grads, dtype *losses) { int block_count = DefaultBlockCount(dim * count); KernelSoftMaxLossByExp<<<block_count, TPB>>>(exps, count, dim, vals, sums, max_vals, answers, reverse_batchsize, grads, losses); CheckCudaError(); } std::pair<dtype, std::vector<int>> SoftMaxLoss(const std::vector<const dtype *> &vals_vector, int count, int dim, const std::vector<int> &gold_answers, int batchsize, const std::vector<dtype *> &losses_vector) { IntArray answer_arr, gold_answer_arr; answer_arr.init(count); gold_answer_arr.init((int*)gold_answers.data(), count); NumberArray max_vals, sum_vals; max_vals.init(count); sum_vals.init(count); NumberPointerArray vals, losses; vals.init((dtype**)vals_vector.data(), count); losses.init((dtype**)losses_vector.data(), count); Max(vals.value, count, dim, answer_arr.value, max_vals.value); Exp(vals.value, count, dim, max_vals.value, losses.value); Sum(losses.value, count, dim, sum_vals.value); NumberArray loss_arr; loss_arr.init(count); SoftMaxLossByExp(losses.value, count, dim, vals.value, sum_vals.value, max_vals.value, gold_answer_arr.value, 1.0 / batchsize, losses.value, loss_arr.value); vector<int> answers(count); MyCudaMemcpy(answers.data(), answer_arr.value, count * sizeof(int), cudaMemcpyDeviceToHost); vector<dtype> loss_vector(count); MyCudaMemcpy(loss_vector.data(), loss_arr.value, count * sizeof(dtype), cudaMemcpyDeviceToHost); dtype loss_sum = accumulate(loss_vector.begin(), loss_vector.end(), 0.0f); return std::make_pair(loss_sum, answers); } __global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, len, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, indexers, count, dim, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); KernelRescale<<<block_count, TPB>>>(v, len, scale); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (!is_bias) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / row; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); CheckCudaError(); block_count = DefaultBlockCount(col); KernelSelfPlusIters<<<block_count, TPB>>>(indexers, iters, col); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, alpha, reg, eps); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, indexers, alpha, reg, eps); CheckCudaError(); } void *GraphHostAlloc() { void *m; CallCuda(cudaHostAlloc(&m, 10000000, cudaHostAllocWriteCombined)); if (m == NULL) { abort(); } return m; } }
93e9188b413bbd7e3638e1add9b94ed37fd034ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix_math.h" #include <iostream> //------------------------ // Matrices //------------------------ int *hst_matrix1; int *hst_matrix2; int *hst_matrix3; int *dev_matrix1; int *dev_matrix2; int *dev_matrix3; #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } void initialize() { hst_matrix1 = new int[25]; hst_matrix2 = new int[25]; hst_matrix3 = new int[25]; for(int i=0; i<25; i++) { hst_matrix1[i] = i; hst_matrix2[i] = i; } std::cout<<"Matrix A : \n"; printMatrix(hst_matrix1); std::cout<<"\n\nMatrix B : \n"; printMatrix(hst_matrix2); std::cout<<std::endl<<std::endl; //Allocate device memory hipMalloc((void**)&dev_matrix1, 25 * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_matrix2, 25 * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_matrix3, 25 * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); //copy values to device hipMemcpy(dev_matrix1, hst_matrix1, 25 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_matrix2, hst_matrix2, 25 * sizeof(int), hipMemcpyHostToDevice); } void doMatrixMath() { //Addition doMatrixAdd(); hipMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), hipMemcpyDeviceToHost); std::cout<<"A + B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; //Subtraction doMatrixSub(); hipMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), hipMemcpyDeviceToHost); std::cout<<"A - B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; //Multiplication doMatrixMul(); hipMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), hipMemcpyDeviceToHost); std::cout<<"A * B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; } void cleanup() { delete(hst_matrix1); delete(hst_matrix2); delete(hst_matrix3); hipFree(dev_matrix1); hipFree(dev_matrix2); hipFree(dev_matrix3); } void printMatrix(int * mat) { for(int i=0; i<5; ++i) { for(int j=0; j<5; ++j) { std::cout<<mat[i*5 + j]<<" "; } std::cout<<std::endl; } } int *h_data; __global__ void mat_add(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { C[index] = A[index] + B[index]; } } __global__ void mat_sub(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { C[index] = A[index] - B[index]; } } __global__ void mat_mul(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { int i, j; i = index % 5; j = index / 5; C[i*5 + j] = A[i*5] * B[j] + A[i*5 + 1] * B[(1)*5 + j] + A[i*5 + 2] * B[(2)*5 + j] + A[i*5 + 3] * B[(3)*5 + j] + A[i*5 + 4] * B[(4)*5 + j]; } } int gridSize = 10, blockSize = 3; void doMatrixAdd() { hipLaunchKernelGGL(( mat_add), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); } void doMatrixSub() { hipLaunchKernelGGL(( mat_sub), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); } void doMatrixMul() { hipLaunchKernelGGL(( mat_mul), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); } void doPerformanceCalculation() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( mat_add), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); hipLaunchKernelGGL(( mat_sub), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); hipLaunchKernelGGL(( mat_mul), dim3(gridSize), dim3(blockSize), 0, 0, dev_matrix1, dev_matrix2, dev_matrix3); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl; }
93e9188b413bbd7e3638e1add9b94ed37fd034ba.cu
#include "matrix_math.h" #include <iostream> //------------------------ // Matrices //------------------------ int *hst_matrix1; int *hst_matrix2; int *hst_matrix3; int *dev_matrix1; int *dev_matrix2; int *dev_matrix3; #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void initialize() { hst_matrix1 = new int[25]; hst_matrix2 = new int[25]; hst_matrix3 = new int[25]; for(int i=0; i<25; i++) { hst_matrix1[i] = i; hst_matrix2[i] = i; } std::cout<<"Matrix A : \n"; printMatrix(hst_matrix1); std::cout<<"\n\nMatrix B : \n"; printMatrix(hst_matrix2); std::cout<<std::endl<<std::endl; //Allocate device memory cudaMalloc((void**)&dev_matrix1, 25 * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_matrix2, 25 * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_matrix3, 25 * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); //copy values to device cudaMemcpy(dev_matrix1, hst_matrix1, 25 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_matrix2, hst_matrix2, 25 * sizeof(int), cudaMemcpyHostToDevice); } void doMatrixMath() { //Addition doMatrixAdd(); cudaMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), cudaMemcpyDeviceToHost); std::cout<<"A + B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; //Subtraction doMatrixSub(); cudaMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), cudaMemcpyDeviceToHost); std::cout<<"A - B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; //Multiplication doMatrixMul(); cudaMemcpy(hst_matrix3, dev_matrix3, 25 * sizeof(int), cudaMemcpyDeviceToHost); std::cout<<"A * B = \n"; printMatrix(hst_matrix3); std::cout<<std::endl<<std::endl; } void cleanup() { delete(hst_matrix1); delete(hst_matrix2); delete(hst_matrix3); cudaFree(dev_matrix1); cudaFree(dev_matrix2); cudaFree(dev_matrix3); } void printMatrix(int * mat) { for(int i=0; i<5; ++i) { for(int j=0; j<5; ++j) { std::cout<<mat[i*5 + j]<<" "; } std::cout<<std::endl; } } int *h_data; __global__ void mat_add(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { C[index] = A[index] + B[index]; } } __global__ void mat_sub(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { C[index] = A[index] - B[index]; } } __global__ void mat_mul(int * A, int * B, int * C) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < 25) { int i, j; i = index % 5; j = index / 5; C[i*5 + j] = A[i*5] * B[j] + A[i*5 + 1] * B[(1)*5 + j] + A[i*5 + 2] * B[(2)*5 + j] + A[i*5 + 3] * B[(3)*5 + j] + A[i*5 + 4] * B[(4)*5 + j]; } } int gridSize = 10, blockSize = 3; void doMatrixAdd() { mat_add<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); } void doMatrixSub() { mat_sub<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); } void doMatrixMul() { mat_mul<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); } void doPerformanceCalculation() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); mat_add<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); mat_sub<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); mat_mul<<<gridSize, blockSize>>>(dev_matrix1, dev_matrix2, dev_matrix3); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl; }
4743189eb93283b6f59100b9262348f138d1a618.hip
// !!! This is a file automatically generated by hipify!!! #include <Eigen/Dense> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/platform.h" using namespace cupoch; using namespace cupoch::geometry; namespace { struct convert_trianglemesh_line_functor { convert_trianglemesh_line_functor(const Eigen::Vector3i *triangles, Eigen::Vector2i *lines) : triangles_(triangles), lines_(lines){}; const Eigen::Vector3i *triangles_; Eigen::Vector2i *lines_; __device__ void operator()(size_t idx) const { const Eigen::Vector3i &vidx = triangles_[idx]; thrust::minimum<int> min; thrust::maximum<int> max; lines_[3 * idx] = Eigen::Vector2i(min(vidx[0], vidx[1]), max(vidx[0], vidx[1])); lines_[3 * idx + 1] = Eigen::Vector2i(min(vidx[1], vidx[2]), max(vidx[1], vidx[2])); lines_[3 * idx + 2] = Eigen::Vector2i(min(vidx[2], vidx[0]), max(vidx[2], vidx[0])); } }; } // namespace std::shared_ptr<LineSet> LineSet::CreateFromPointCloudCorrespondences( const PointCloud &cloud0, const PointCloud &cloud1, const utility::device_vector<thrust::pair<int, int>> &correspondences) { auto lineset_ptr = std::make_shared<LineSet>(); const size_t point0_size = cloud0.points_.size(); const size_t point1_size = cloud1.points_.size(); const size_t corr_size = correspondences.size(); lineset_ptr->points_.resize(point0_size + point1_size); lineset_ptr->lines_.resize(corr_size); thrust::copy_n(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), cloud0.points_.begin(), point0_size, lineset_ptr->points_.begin()); thrust::copy_n(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), cloud1.points_.begin(), point1_size, lineset_ptr->points_.begin() + point0_size); thrust::transform(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), correspondences.begin(), correspondences.end(), lineset_ptr->lines_.begin(), [=] __device__(const thrust::pair<int, int> &corrs) { return Eigen::Vector2i(corrs.first, point0_size + corrs.second); }); cudaSafeCall(hipDeviceSynchronize()); return lineset_ptr; } std::shared_ptr<LineSet> LineSet::CreateFromTriangleMesh( const TriangleMesh &mesh) { auto lineset_ptr = std::make_shared<LineSet>(); lineset_ptr->points_.resize(mesh.vertices_.size()); lineset_ptr->lines_.resize(mesh.triangles_.size() * 3); convert_trianglemesh_line_functor func( thrust::raw_pointer_cast(mesh.triangles_.data()), thrust::raw_pointer_cast(lineset_ptr->lines_.data())); thrust::copy(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), mesh.vertices_.begin(), mesh.vertices_.end(), lineset_ptr->points_.begin()); thrust::for_each(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size()), func); auto end = thrust::unique(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), lineset_ptr->lines_.begin(), lineset_ptr->lines_.end()); lineset_ptr->lines_.resize( thrust::distance(lineset_ptr->lines_.begin(), end)); cudaSafeCall(hipDeviceSynchronize()); return lineset_ptr; } std::shared_ptr<LineSet> LineSet::CreateFromOrientedBoundingBox( const OrientedBoundingBox &box) { auto line_set = std::make_shared<LineSet>(); const auto points = box.GetBoxPoints(); for (const auto &pt : points) line_set->points_.push_back(pt); line_set->lines_.push_back(Eigen::Vector2i(0, 1)); line_set->lines_.push_back(Eigen::Vector2i(1, 7)); line_set->lines_.push_back(Eigen::Vector2i(7, 2)); line_set->lines_.push_back(Eigen::Vector2i(2, 0)); line_set->lines_.push_back(Eigen::Vector2i(3, 6)); line_set->lines_.push_back(Eigen::Vector2i(6, 4)); line_set->lines_.push_back(Eigen::Vector2i(4, 5)); line_set->lines_.push_back(Eigen::Vector2i(5, 3)); line_set->lines_.push_back(Eigen::Vector2i(0, 3)); line_set->lines_.push_back(Eigen::Vector2i(1, 6)); line_set->lines_.push_back(Eigen::Vector2i(7, 4)); line_set->lines_.push_back(Eigen::Vector2i(2, 5)); line_set->PaintUniformColor(box.color_); return line_set; } std::shared_ptr<LineSet> LineSet::CreateFromAxisAlignedBoundingBox( const AxisAlignedBoundingBox &box) { auto line_set = std::make_shared<LineSet>(); const auto points = box.GetBoxPoints(); for (const auto &pt : points) line_set->points_.push_back(pt); line_set->lines_.push_back(Eigen::Vector2i(0, 1)); line_set->lines_.push_back(Eigen::Vector2i(1, 7)); line_set->lines_.push_back(Eigen::Vector2i(7, 2)); line_set->lines_.push_back(Eigen::Vector2i(2, 0)); line_set->lines_.push_back(Eigen::Vector2i(3, 6)); line_set->lines_.push_back(Eigen::Vector2i(6, 4)); line_set->lines_.push_back(Eigen::Vector2i(4, 5)); line_set->lines_.push_back(Eigen::Vector2i(5, 3)); line_set->lines_.push_back(Eigen::Vector2i(0, 3)); line_set->lines_.push_back(Eigen::Vector2i(1, 6)); line_set->lines_.push_back(Eigen::Vector2i(7, 4)); line_set->lines_.push_back(Eigen::Vector2i(2, 5)); line_set->PaintUniformColor(box.color_); return line_set; }
4743189eb93283b6f59100b9262348f138d1a618.cu
#include <Eigen/Dense> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/platform.h" using namespace cupoch; using namespace cupoch::geometry; namespace { struct convert_trianglemesh_line_functor { convert_trianglemesh_line_functor(const Eigen::Vector3i *triangles, Eigen::Vector2i *lines) : triangles_(triangles), lines_(lines){}; const Eigen::Vector3i *triangles_; Eigen::Vector2i *lines_; __device__ void operator()(size_t idx) const { const Eigen::Vector3i &vidx = triangles_[idx]; thrust::minimum<int> min; thrust::maximum<int> max; lines_[3 * idx] = Eigen::Vector2i(min(vidx[0], vidx[1]), max(vidx[0], vidx[1])); lines_[3 * idx + 1] = Eigen::Vector2i(min(vidx[1], vidx[2]), max(vidx[1], vidx[2])); lines_[3 * idx + 2] = Eigen::Vector2i(min(vidx[2], vidx[0]), max(vidx[2], vidx[0])); } }; } // namespace std::shared_ptr<LineSet> LineSet::CreateFromPointCloudCorrespondences( const PointCloud &cloud0, const PointCloud &cloud1, const utility::device_vector<thrust::pair<int, int>> &correspondences) { auto lineset_ptr = std::make_shared<LineSet>(); const size_t point0_size = cloud0.points_.size(); const size_t point1_size = cloud1.points_.size(); const size_t corr_size = correspondences.size(); lineset_ptr->points_.resize(point0_size + point1_size); lineset_ptr->lines_.resize(corr_size); thrust::copy_n(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), cloud0.points_.begin(), point0_size, lineset_ptr->points_.begin()); thrust::copy_n(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), cloud1.points_.begin(), point1_size, lineset_ptr->points_.begin() + point0_size); thrust::transform(utility::exec_policy(utility::GetStream(2)) ->on(utility::GetStream(2)), correspondences.begin(), correspondences.end(), lineset_ptr->lines_.begin(), [=] __device__(const thrust::pair<int, int> &corrs) { return Eigen::Vector2i(corrs.first, point0_size + corrs.second); }); cudaSafeCall(cudaDeviceSynchronize()); return lineset_ptr; } std::shared_ptr<LineSet> LineSet::CreateFromTriangleMesh( const TriangleMesh &mesh) { auto lineset_ptr = std::make_shared<LineSet>(); lineset_ptr->points_.resize(mesh.vertices_.size()); lineset_ptr->lines_.resize(mesh.triangles_.size() * 3); convert_trianglemesh_line_functor func( thrust::raw_pointer_cast(mesh.triangles_.data()), thrust::raw_pointer_cast(lineset_ptr->lines_.data())); thrust::copy(utility::exec_policy(utility::GetStream(0)) ->on(utility::GetStream(0)), mesh.vertices_.begin(), mesh.vertices_.end(), lineset_ptr->points_.begin()); thrust::for_each(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size()), func); auto end = thrust::unique(utility::exec_policy(utility::GetStream(1)) ->on(utility::GetStream(1)), lineset_ptr->lines_.begin(), lineset_ptr->lines_.end()); lineset_ptr->lines_.resize( thrust::distance(lineset_ptr->lines_.begin(), end)); cudaSafeCall(cudaDeviceSynchronize()); return lineset_ptr; } std::shared_ptr<LineSet> LineSet::CreateFromOrientedBoundingBox( const OrientedBoundingBox &box) { auto line_set = std::make_shared<LineSet>(); const auto points = box.GetBoxPoints(); for (const auto &pt : points) line_set->points_.push_back(pt); line_set->lines_.push_back(Eigen::Vector2i(0, 1)); line_set->lines_.push_back(Eigen::Vector2i(1, 7)); line_set->lines_.push_back(Eigen::Vector2i(7, 2)); line_set->lines_.push_back(Eigen::Vector2i(2, 0)); line_set->lines_.push_back(Eigen::Vector2i(3, 6)); line_set->lines_.push_back(Eigen::Vector2i(6, 4)); line_set->lines_.push_back(Eigen::Vector2i(4, 5)); line_set->lines_.push_back(Eigen::Vector2i(5, 3)); line_set->lines_.push_back(Eigen::Vector2i(0, 3)); line_set->lines_.push_back(Eigen::Vector2i(1, 6)); line_set->lines_.push_back(Eigen::Vector2i(7, 4)); line_set->lines_.push_back(Eigen::Vector2i(2, 5)); line_set->PaintUniformColor(box.color_); return line_set; } std::shared_ptr<LineSet> LineSet::CreateFromAxisAlignedBoundingBox( const AxisAlignedBoundingBox &box) { auto line_set = std::make_shared<LineSet>(); const auto points = box.GetBoxPoints(); for (const auto &pt : points) line_set->points_.push_back(pt); line_set->lines_.push_back(Eigen::Vector2i(0, 1)); line_set->lines_.push_back(Eigen::Vector2i(1, 7)); line_set->lines_.push_back(Eigen::Vector2i(7, 2)); line_set->lines_.push_back(Eigen::Vector2i(2, 0)); line_set->lines_.push_back(Eigen::Vector2i(3, 6)); line_set->lines_.push_back(Eigen::Vector2i(6, 4)); line_set->lines_.push_back(Eigen::Vector2i(4, 5)); line_set->lines_.push_back(Eigen::Vector2i(5, 3)); line_set->lines_.push_back(Eigen::Vector2i(0, 3)); line_set->lines_.push_back(Eigen::Vector2i(1, 6)); line_set->lines_.push_back(Eigen::Vector2i(7, 4)); line_set->lines_.push_back(Eigen::Vector2i(2, 5)); line_set->PaintUniformColor(box.color_); return line_set; }
9d361940ca4b5d8f821cb411f578874d4d7419b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "spring_attacher_gpu.hpp" #include <sofa/gpu/cuda/CudaCommon.h> #include <iostream> #include <cuda_gl_interop.h> #include "kernel_math.hpp" struct assoc_t{ int prim; float3 bary; bool valid; __device__ bool is_valid() { return valid; } }; __device__ assoc_t unpack_association_data(float4 &packed) { assoc_t assoc; //for(int i=0; i<3; ++i) get(assoc.bary,i) = get(packed,i); assoc.bary.x = packed.x; assoc.bary.y = packed.y; assoc.bary.z = packed.z; if(packed.w >= 0){ assoc.prim = (unsigned int) packed.w; assoc.valid = true; } else { assoc.prim = 0; assoc.valid = false; } return assoc; } __device__ int get_index_x() { int index = blockDim.x * blockIdx.x + threadIdx.x; return index; } __device__ int2 get_index_xy() { int2 index; index.x = blockDim.x * blockIdx.x + threadIdx.x; index.y = blockDim.y * blockIdx.y + threadIdx.y; return index; } __device__ void fetch_vertices(float3* vertices, const float3* vertex_list, uint3 &vertex_indices) { for(size_t i=0; i<3; ++i) vertices[i] = vertex_list[get(vertex_indices,i)]; } __global__ void clear_kernel(float3* anchor_list, float* weights, unsigned int size) { int index = get_index_x(); if(index < size) { anchor_list[index].x = 0; anchor_list[index].y = 0; anchor_list[index].z = 0; weights[index] = 0; } } __global__ void accumulate_forces_kernel(float3* data, float4* association_data, float3* anchor_list, uint3* triangles, float3* vertex_list, float3* normal_list, float* weights, unsigned int width, unsigned int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if( x >= width || y >= height) return; float3 data_point = data[x+y*width]; if(!isfinite(data_point)) return; assoc_t assoc = unpack_association_data(association_data[x+y*width]); if(!assoc.valid) { for(int dx=-2; dx<=2; ++dx) { for(int dy=-2; dy <=2; ++dy) { int idx = (x+dx)+(y+dy)*width; assoc = unpack_association_data(association_data[idx]); if(assoc.valid) break; } } } if(!assoc.valid) return; uint3 triangle = triangles[assoc.prim]; for(int i=0; i<3; ++i) { unsigned int vertex_index = get(triangle,i); float3 vertex = vertex_list[vertex_index]; float3 normal = normal_list[vertex_index]; normalize(normal); float3 &anchor = anchor_list[vertex_index]; float &weight = weights[vertex_index]; float3 relative = data_point - vertex; float len = norm(relative); if(len < 25) { float l = relative*normal; if(l > 5) l = 5; float3 projected = normal * l; if(len>5) projected *= 25/(len*len); if(isfinite(projected)){ accumulate(anchor, projected); atomicAdd(&weight ,1); } } } } __global__ void attach_kernel(float3* anchors, float3* vertices, float* weights, unsigned int vertex_count) { int index = get_index_x(); if(index >= vertex_count) return; float3 &anchor = anchors[index]; const float3 &vertex = vertices[index]; const float &weight = weights[index]; float3 force = anchor; float3 displacement_vector = {0.0001f,0,0}; if(weight > 0 && isfinite(force)) { displacement_vector = force / weight; } if(norm2(displacement_vector) < 0.00001f) { displacement_vector.x = 0.0001f; displacement_vector.y = 0; displacement_vector.z = 0; } anchor = vertex + displacement_vector; } void attach_gpu_springs( float *data, float *association_data, float *anchors, unsigned int *triangles, float *vertices, float *normals, float* weights, unsigned int vertex_count, unsigned int width, unsigned int height) { dim3 vertex_dim = dim3(vertex_count,1,1); dim3 num_threads_vertices = dim3(BSIZE,1,1); dim3 num_blocks_vertices = calculate_block_count(vertex_dim, num_threads_vertices); dim3 image_dim = dim3(width,height,1); dim3 num_threads_image = dim3(8,8,1); dim3 num_blocks_image = calculate_block_count(image_dim, num_threads_image); hipLaunchKernelGGL(( clear_kernel), dim3(num_blocks_vertices),dim3(num_threads_vertices), 0, 0, (float3*) anchors, weights, vertex_count ); hipDeviceSynchronize(); hipLaunchKernelGGL(( accumulate_forces_kernel), dim3(num_blocks_image), dim3(num_threads_image), 0, 0, (float3*) data, (float4*) association_data, (float3*) anchors, (uint3*) triangles, (float3*) vertices, (float3*) normals, weights, width, height ); hipDeviceSynchronize(); hipLaunchKernelGGL(( attach_kernel), dim3(num_blocks_vertices), dim3(num_threads_vertices), 0, 0, (float3*) anchors, (float3*) vertices, weights, vertex_count ); hipDeviceSynchronize(); } void map_association_surface(hipArray * &array) { }
9d361940ca4b5d8f821cb411f578874d4d7419b6.cu
#include "spring_attacher_gpu.hpp" #include <sofa/gpu/cuda/CudaCommon.h> #include <iostream> #include <cuda_gl_interop.h> #include "kernel_math.hpp" struct assoc_t{ int prim; float3 bary; bool valid; __device__ bool is_valid() { return valid; } }; __device__ assoc_t unpack_association_data(float4 &packed) { assoc_t assoc; //for(int i=0; i<3; ++i) get(assoc.bary,i) = get(packed,i); assoc.bary.x = packed.x; assoc.bary.y = packed.y; assoc.bary.z = packed.z; if(packed.w >= 0){ assoc.prim = (unsigned int) packed.w; assoc.valid = true; } else { assoc.prim = 0; assoc.valid = false; } return assoc; } __device__ int get_index_x() { int index = blockDim.x * blockIdx.x + threadIdx.x; return index; } __device__ int2 get_index_xy() { int2 index; index.x = blockDim.x * blockIdx.x + threadIdx.x; index.y = blockDim.y * blockIdx.y + threadIdx.y; return index; } __device__ void fetch_vertices(float3* vertices, const float3* vertex_list, uint3 &vertex_indices) { for(size_t i=0; i<3; ++i) vertices[i] = vertex_list[get(vertex_indices,i)]; } __global__ void clear_kernel(float3* anchor_list, float* weights, unsigned int size) { int index = get_index_x(); if(index < size) { anchor_list[index].x = 0; anchor_list[index].y = 0; anchor_list[index].z = 0; weights[index] = 0; } } __global__ void accumulate_forces_kernel(float3* data, float4* association_data, float3* anchor_list, uint3* triangles, float3* vertex_list, float3* normal_list, float* weights, unsigned int width, unsigned int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if( x >= width || y >= height) return; float3 data_point = data[x+y*width]; if(!isfinite(data_point)) return; assoc_t assoc = unpack_association_data(association_data[x+y*width]); if(!assoc.valid) { for(int dx=-2; dx<=2; ++dx) { for(int dy=-2; dy <=2; ++dy) { int idx = (x+dx)+(y+dy)*width; assoc = unpack_association_data(association_data[idx]); if(assoc.valid) break; } } } if(!assoc.valid) return; uint3 triangle = triangles[assoc.prim]; for(int i=0; i<3; ++i) { unsigned int vertex_index = get(triangle,i); float3 vertex = vertex_list[vertex_index]; float3 normal = normal_list[vertex_index]; normalize(normal); float3 &anchor = anchor_list[vertex_index]; float &weight = weights[vertex_index]; float3 relative = data_point - vertex; float len = norm(relative); if(len < 25) { float l = relative*normal; if(l > 5) l = 5; float3 projected = normal * l; if(len>5) projected *= 25/(len*len); if(isfinite(projected)){ accumulate(anchor, projected); atomicAdd(&weight ,1); } } } } __global__ void attach_kernel(float3* anchors, float3* vertices, float* weights, unsigned int vertex_count) { int index = get_index_x(); if(index >= vertex_count) return; float3 &anchor = anchors[index]; const float3 &vertex = vertices[index]; const float &weight = weights[index]; float3 force = anchor; float3 displacement_vector = {0.0001f,0,0}; if(weight > 0 && isfinite(force)) { displacement_vector = force / weight; } if(norm2(displacement_vector) < 0.00001f) { displacement_vector.x = 0.0001f; displacement_vector.y = 0; displacement_vector.z = 0; } anchor = vertex + displacement_vector; } void attach_gpu_springs( float *data, float *association_data, float *anchors, unsigned int *triangles, float *vertices, float *normals, float* weights, unsigned int vertex_count, unsigned int width, unsigned int height) { dim3 vertex_dim = dim3(vertex_count,1,1); dim3 num_threads_vertices = dim3(BSIZE,1,1); dim3 num_blocks_vertices = calculate_block_count(vertex_dim, num_threads_vertices); dim3 image_dim = dim3(width,height,1); dim3 num_threads_image = dim3(8,8,1); dim3 num_blocks_image = calculate_block_count(image_dim, num_threads_image); clear_kernel<<<num_blocks_vertices,num_threads_vertices>>>( (float3*) anchors, weights, vertex_count ); cudaDeviceSynchronize(); accumulate_forces_kernel<<<num_blocks_image, num_threads_image>>>( (float3*) data, (float4*) association_data, (float3*) anchors, (uint3*) triangles, (float3*) vertices, (float3*) normals, weights, width, height ); cudaDeviceSynchronize(); attach_kernel<<<num_blocks_vertices, num_threads_vertices>>>( (float3*) anchors, (float3*) vertices, weights, vertex_count ); cudaDeviceSynchronize(); } void map_association_surface(CUarray &array) { }
softmax_with_cross_entropy_op.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/softmax_with_cross_entropy_op.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; namespace { template <typename T> __global__ void CrossEntropyGrad(T* logit_grad, const T* loss_grad, const int64_t* labels, const int batch_size, const int class_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = tid / class_num; if (tid < batch_size) { PADDLE_ASSERT(labels[sample_idx] >= 0 && labels[sample_idx] < class_num); logit_grad[tid * class_num + labels[tid]] -= static_cast<T>(1.); } __syncthreads(); if (tid < batch_size * class_num) { logit_grad[tid] *= loss_grad[sample_idx]; } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int batch_size, const int class_num) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < batch_size * class_num) { int row_ids = ids / class_num; logit_grad[ids] = loss_grad[row_ids] * (logit_grad[ids] - labels[ids]); } } } // namespace template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), "This kernel only runs on GPU device."); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); softmax->mutable_data<T>(context.GetPlace()); loss->mutable_data<T>(context.GetPlace()); math::SoftmaxFunctor<platform::GPUPlace, T>()(context.device_context(), logits, softmax); math::CrossEntropyFunctor<platform::GPUPlace, T>()( context.device_context(), loss, softmax, labels, context.Attr<bool>("soft_label")); } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), "This kernel only runs on GPU device."); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); logit_grad->ShareDataWith(*context.Input<Tensor>("Softmax")); T* logit_grad_data = logit_grad->data<T>(); const int batch_size = logit_grad->dims()[0]; const int class_num = logit_grad->dims()[1]; int block = 512; int grid = (batch_size * class_num + block - 1) / block; if (context.Attr<bool>("soft_label")) { const T* label_data = labels->data<T>(); hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>( context.device_context()) .stream(), logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } else { const int64_t* label_data = labels->data<int64_t>(); hipLaunchKernelGGL(( CrossEntropyGrad<T>), dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>( context.device_context()) .stream(), logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
softmax_with_cross_entropy_op.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/softmax_with_cross_entropy_op.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; namespace { template <typename T> __global__ void CrossEntropyGrad(T* logit_grad, const T* loss_grad, const int64_t* labels, const int batch_size, const int class_num) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = tid / class_num; if (tid < batch_size) { PADDLE_ASSERT(labels[sample_idx] >= 0 && labels[sample_idx] < class_num); logit_grad[tid * class_num + labels[tid]] -= static_cast<T>(1.); } __syncthreads(); if (tid < batch_size * class_num) { logit_grad[tid] *= loss_grad[sample_idx]; } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int batch_size, const int class_num) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < batch_size * class_num) { int row_ids = ids / class_num; logit_grad[ids] = loss_grad[row_ids] * (logit_grad[ids] - labels[ids]); } } } // namespace template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), "This kernel only runs on GPU device."); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); softmax->mutable_data<T>(context.GetPlace()); loss->mutable_data<T>(context.GetPlace()); math::SoftmaxFunctor<platform::GPUPlace, T>()(context.device_context(), logits, softmax); math::CrossEntropyFunctor<platform::GPUPlace, T>()( context.device_context(), loss, softmax, labels, context.Attr<bool>("soft_label")); } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), "This kernel only runs on GPU device."); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); logit_grad->ShareDataWith(*context.Input<Tensor>("Softmax")); T* logit_grad_data = logit_grad->data<T>(); const int batch_size = logit_grad->dims()[0]; const int class_num = logit_grad->dims()[1]; int block = 512; int grid = (batch_size * class_num + block - 1) / block; if (context.Attr<bool>("soft_label")) { const T* label_data = labels->data<T>(); SoftCrossEntropyGradientKernel<T><<< grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>( context.device_context()) .stream()>>>(logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } else { const int64_t* label_data = labels->data<int64_t>(); CrossEntropyGrad<T><<< grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>( context.device_context()) .stream()>>>(logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
aadb3897beefc7e91c9e9c0e904f835cede0490a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "mathKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); float *input1 = NULL; hipMalloc(&input1, XSIZE*YSIZE); float *input2 = NULL; hipMalloc(&input2, XSIZE*YSIZE); int n = XSIZE*YSIZE; int oper = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( mathKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,n,oper); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( mathKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,n,oper); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( mathKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,n,oper); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aadb3897beefc7e91c9e9c0e904f835cede0490a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "mathKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); float *input1 = NULL; cudaMalloc(&input1, XSIZE*YSIZE); float *input2 = NULL; cudaMalloc(&input2, XSIZE*YSIZE); int n = XSIZE*YSIZE; int oper = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); mathKernel<<<gridBlock,threadBlock>>>(output,input1,input2,n,oper); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { mathKernel<<<gridBlock,threadBlock>>>(output,input1,input2,n,oper); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { mathKernel<<<gridBlock,threadBlock>>>(output,input1,input2,n,oper); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e607e8eb6d4cd235f0e31cff4a0e9f833632e3f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<cuda.h> #include <stdio.h> __global__ void addKernel(int * dev_a, int* dev_b ,int* dev_size) { int i = threadIdx.x; int j,p; for (j = 0; j < (*dev_size); j++) { p = *dev_size*i + j; dev_b[i] += dev_a[p]; //printf("%d %d\n", i, p); } } int main() { const int size = 3; int s = size; int a[size][size]; int b[size] = { 0 }; int i = 0, j = 0; for (i = 0; i < size; i++) for (j = 0; j < size; j++) scanf("%d", &a[i][j]); int *dev_a, *dev_b, *dev_size; int t = size*size*sizeof(int); int t1 = size*sizeof(int); hipMalloc((void**)&dev_a, t); hipMalloc((void**)&dev_b, t); hipMalloc((void**)&dev_size, sizeof(int)); hipMemcpy(dev_a, a, t, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, t1, hipMemcpyHostToDevice); hipMemcpy(dev_size, &s, sizeof(int), hipMemcpyHostToDevice); addKernel << <1, size >> >(dev_a, dev_b, dev_size); hipMemcpy(b, dev_b, t1, hipMemcpyDeviceToHost); printf("-----OUTPUT-----\n"); int p = 0; for (i = 0; i < size; i++){ //printf("%d ", b[i]); p += b[i]; //printf("\n"); } printf("%d", p); hipFree(dev_a); hipFree(dev_b); return 0; }
e607e8eb6d4cd235f0e31cff4a0e9f833632e3f8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<cuda.h> #include <stdio.h> __global__ void addKernel(int * dev_a, int* dev_b ,int* dev_size) { int i = threadIdx.x; int j,p; for (j = 0; j < (*dev_size); j++) { p = *dev_size*i + j; dev_b[i] += dev_a[p]; //printf("%d %d\n", i, p); } } int main() { const int size = 3; int s = size; int a[size][size]; int b[size] = { 0 }; int i = 0, j = 0; for (i = 0; i < size; i++) for (j = 0; j < size; j++) scanf("%d", &a[i][j]); int *dev_a, *dev_b, *dev_size; int t = size*size*sizeof(int); int t1 = size*sizeof(int); cudaMalloc((void**)&dev_a, t); cudaMalloc((void**)&dev_b, t); cudaMalloc((void**)&dev_size, sizeof(int)); cudaMemcpy(dev_a, a, t, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, t1, cudaMemcpyHostToDevice); cudaMemcpy(dev_size, &s, sizeof(int), cudaMemcpyHostToDevice); addKernel << <1, size >> >(dev_a, dev_b, dev_size); cudaMemcpy(b, dev_b, t1, cudaMemcpyDeviceToHost); printf("-----OUTPUT-----\n"); int p = 0; for (i = 0; i < size; i++){ //printf("%d ", b[i]); p += b[i]; //printf("\n"); } printf("%d", p); cudaFree(dev_a); cudaFree(dev_b); return 0; }
5ccf5297fa9b52dea662f46c1b316f5653de30ba.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #define N 10 int main(int argc, char** argv){ int vec_in[N] ={6,1,7,3,2,9,10,5,4,8}; int vec_out[N]; int* d_vec; hipMalloc(&d_vec, N*sizeof(int)); hipMemcpy(d_vec, vec_in, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(vec_out, d_vec, N*sizeof(int), hipMemcpyDeviceToHost); printf("vec_out[3]= %d \n",vec_out[3]); return 0; }
5ccf5297fa9b52dea662f46c1b316f5653de30ba.cu
#include<stdio.h> #define N 10 int main(int argc, char** argv){ int vec_in[N] ={6,1,7,3,2,9,10,5,4,8}; int vec_out[N]; int* d_vec; cudaMalloc(&d_vec, N*sizeof(int)); cudaMemcpy(d_vec, vec_in, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(vec_out, d_vec, N*sizeof(int), cudaMemcpyDeviceToHost); printf("vec_out[3]= %d \n",vec_out[3]); return 0; }
16be1bc260a662c19eab8ea05b5a410af29fa1e8.hip
// !!! This is a file automatically generated by hipify!!! #include "SteerForTargetSpeed.h" #include <hip/hip_runtime.h> #include "OpenSteer/VehicleData.h" #include "OpenSteer/SeekVectorProvider.h" #include "CUDAKernelOptions.cu" #include <iostream> using namespace OpenSteer; using namespace std; __global__ void steerForTargetSpeedKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float *targetSpeeds, float3 *steeringVectors, float weight, kernel_options options); OpenSteer::SteerForTargetSpeed::SteerForTargetSpeed(TargetSpeedProvider* targetSpeedProvider, float weight, kernel_options options) { threadsPerBlock = 128; this->targetSpeedProvider = targetSpeedProvider; this->weight = weight; this->options = options; } OpenSteer::SteerForTargetSpeed::~SteerForTargetSpeed() {} void OpenSteer::SteerForTargetSpeed::init() { // nothing to do } void OpenSteer::SteerForTargetSpeed::run() { hipLaunchKernelGGL(( steerForTargetSpeedKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), getVehicleConst(), targetSpeedProvider->getTargetSpeeds(), getSteeringVectors(), weight, options); } void OpenSteer::SteerForTargetSpeed::close() { // nothing to do }
16be1bc260a662c19eab8ea05b5a410af29fa1e8.cu
#include "SteerForTargetSpeed.h" #include <cuda_runtime.h> #include "OpenSteer/VehicleData.h" #include "OpenSteer/SeekVectorProvider.h" #include "CUDAKernelOptions.cu" #include <iostream> using namespace OpenSteer; using namespace std; __global__ void steerForTargetSpeedKernel(VehicleData *vehicleData, VehicleConst *vehicleConst, float *targetSpeeds, float3 *steeringVectors, float weight, kernel_options options); OpenSteer::SteerForTargetSpeed::SteerForTargetSpeed(TargetSpeedProvider* targetSpeedProvider, float weight, kernel_options options) { threadsPerBlock = 128; this->targetSpeedProvider = targetSpeedProvider; this->weight = weight; this->options = options; } OpenSteer::SteerForTargetSpeed::~SteerForTargetSpeed() {} void OpenSteer::SteerForTargetSpeed::init() { // nothing to do } void OpenSteer::SteerForTargetSpeed::run() { steerForTargetSpeedKernel<<<gridDim(), blockDim()>>>(getVehicleData(), getVehicleConst(), targetSpeedProvider->getTargetSpeeds(), getSteeringVectors(), weight, options); } void OpenSteer::SteerForTargetSpeed::close() { // nothing to do }
1f623a3d8d216771282d77e6a9c5faa684ec430b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using platform::PADDLE_CUDA_NUM_THREADS; #define CUDA_BLOCK_SIZE 16 template <typename T> __global__ void PolygonBoxTransformKernel(const int n, const int h, const int w, const T* input, T* output) { int id_n = threadIdx.x + blockDim.x * blockIdx.x; int id_h = threadIdx.y + blockDim.y * blockIdx.y; int id_w = threadIdx.z + blockDim.z * blockIdx.z; if (id_n < n && id_h < h && id_w < w) { int id = id_n * h * w + w * id_h + id_w; if (id_n % 2 == 0) { output[id] = id_w * 4 - input[id]; } else { output[id] = id_h * 4 - input[id]; } } } template <typename T> class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument( "The polygon_box_transform operator needs to be executed on GPU.")); auto* in = ctx.Input<Tensor>("Input"); auto in_dims = in->dims(); const T* in_data = in->data<T>(); auto* out = ctx.Output<Tensor>("Output"); T* out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = in_dims[0]; int geo_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; dim3 threadsPerBlock( PADDLE_CUDA_NUM_THREADS / (CUDA_BLOCK_SIZE * CUDA_BLOCK_SIZE), CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE); dim3 numBlocks((batch_size * geo_channels) / threadsPerBlock.x, (height + threadsPerBlock.y - 1) / threadsPerBlock.y, (width + threadsPerBlock.z - 1) / threadsPerBlock.z); auto stream = ctx.cuda_device_context().stream(); hipLaunchKernelGGL(( PolygonBoxTransformKernel<T>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, batch_size * geo_channels, height, width, in_data, out_data); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( polygon_box_transform, paddle::operators::PolygonBoxTransformOpCUDAKernel<float>, paddle::operators::PolygonBoxTransformOpCUDAKernel<double>);
1f623a3d8d216771282d77e6a9c5faa684ec430b.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using platform::PADDLE_CUDA_NUM_THREADS; #define CUDA_BLOCK_SIZE 16 template <typename T> __global__ void PolygonBoxTransformKernel(const int n, const int h, const int w, const T* input, T* output) { int id_n = threadIdx.x + blockDim.x * blockIdx.x; int id_h = threadIdx.y + blockDim.y * blockIdx.y; int id_w = threadIdx.z + blockDim.z * blockIdx.z; if (id_n < n && id_h < h && id_w < w) { int id = id_n * h * w + w * id_h + id_w; if (id_n % 2 == 0) { output[id] = id_w * 4 - input[id]; } else { output[id] = id_h * 4 - input[id]; } } } template <typename T> class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument( "The polygon_box_transform operator needs to be executed on GPU.")); auto* in = ctx.Input<Tensor>("Input"); auto in_dims = in->dims(); const T* in_data = in->data<T>(); auto* out = ctx.Output<Tensor>("Output"); T* out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = in_dims[0]; int geo_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; dim3 threadsPerBlock( PADDLE_CUDA_NUM_THREADS / (CUDA_BLOCK_SIZE * CUDA_BLOCK_SIZE), CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE); dim3 numBlocks((batch_size * geo_channels) / threadsPerBlock.x, (height + threadsPerBlock.y - 1) / threadsPerBlock.y, (width + threadsPerBlock.z - 1) / threadsPerBlock.z); auto stream = ctx.cuda_device_context().stream(); PolygonBoxTransformKernel<T><<<numBlocks, threadsPerBlock, 0, stream>>>( batch_size * geo_channels, height, width, in_data, out_data); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( polygon_box_transform, paddle::operators::PolygonBoxTransformOpCUDAKernel<float>, paddle::operators::PolygonBoxTransformOpCUDAKernel<double>);
7ae59d141c2700a5abb802a9c2cb17c4aeffbf70.hip
// !!! This is a file automatically generated by hipify!!! #include "SceCells.h" #include <cmath> double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceIIDiv_M[5]; __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } // //Ali __device__ double calExtForce(double& curTime) { return curTime * F_Ext_Incline_M2; } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017 std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, double InitTimeStage) { // curTime = 0.0 + 55800.0;//AAMIRIi curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; lastTimeExchange=0 ; firstTimeReadDpp=true ; //currentActiveCellCountOld=1 ; // small number tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); initialize_M(nodesInput); cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ; copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali //cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ; cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI std::cout << "finished " << std::endl; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ; //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali std::cout << " *** 1 ***" << endl; std::cout.flush(); this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff); cout<< "The important curTime used in simulation is here which is"<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; curTime = curTime + dt; std::cout << " *** 2 ***" << endl; std::cout.flush(); applySceCellDisc_M(); std::cout << " *** 3 ***" << endl; std::cout.flush(); //Ali computeCenterPos_M(); exchSignal(); BC_Imp_M() ; std::cout << " *** 3.5 ***" << endl; std::cout.flush(); //Ali applyMemForce_M(); std::cout << " *** 4 ***" << endl; std::cout.flush(); //Ali cmment // // computeCenterPos_M(); std::cout << " *** 5 ***" << endl; std::cout.flush(); //Ali cmment // growAtRandom_M(dt); std::cout << " *** 6 ***" << endl; std::cout.flush(); //if (curTime<3300.0) divide2D_M(); std::cout << " *** 7 ***" << endl; std::cout.flush(); distributeCellGrowthProgress_M(); std::cout << " *** 8 ***" << endl; std::cout.flush(); findTangentAndNormal_M();//AAMIRI ADDED May29 allComponentsMove_M(); std::cout << " *** 9 ***" << endl; std::cout.flush(); handleMembrGrowth_M(); std::cout << " *** 10 ***" << endl; std::cout.flush(); } void SceCells::exchSignal(){ if (firstTimeReadDpp) { uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ; signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ; cout << " I passed the initializtion for signaling module" << endl ; } lastTimeExchange=lastTimeExchange+dt ; cout << "last time exchange is " << lastTimeExchange << endl ; cout << "dt is " << dt << endl ; double exchPeriod=360 ; if ( lastTimeExchange>exchPeriod) { lastTimeExchange=0 ; //vector<CVector> cellCentersHost ; //cellCentersHost=getAllCellCenters(); //Ali cout << "I entered the function to update dpp" << endl ; thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin()); thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin()); thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin()); signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size()); thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ; //currentActiveCellCountOld=allocPara_m.currentActiveCellCount; } if (firstTimeReadDpp) { thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ; firstTimeReadDpp=false ; } } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { //void SceCells::getAllCellCenters() { //thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; //thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; //thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; thrust::host_vector<double> centerX( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerX.begin()); thrust::host_vector<double> centerY( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerY.begin()); thrust::host_vector<double> centerZ( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordZ.begin(), cellInfoVecs.centerCoordZ.begin() + allocPara_m.currentActiveCellCount, centerZ.begin()); //infoForSignal.sCenterX=centerX[4] ; //infoForSignal.sCenterY=centerY[4] ; //infoForSignal.sCenterZ=centerZ[4] ; std::vector<CVector> result; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); //infoForSignal.sCenterX=centerX[i] ; //infoForSignal.sCenterY=centerY[i] ; //infoForSignal.sCenterZ=centerZ[i] ; result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::copyCellsEnterMitotic() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir; divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added // std::vector<VecVal> tmp1Membr, tmp2Membr; CVector cell1Center, cell2Center; obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center, cell2Center); prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr); processMemVec(tmp1Membr, tmp2Membr); shiftIntnlNodesByCellCenter(cell1Center, cell2Center); assembleVecForTwoCells(i); } //divDebug(); } //A&A void SceCells::findHertwigAxis() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); double lenAlongMajorAxis; CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; //std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; //std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; //std::cout<<divDir.x<<"HertwigXdir " <<std::endl; //std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank]; } } void SceCells::copySecondCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRankMother = divAuxData.tmpCellRank_M[i]; uint cellRank = allocPara_m.currentActiveCellCount + i; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::applyMemForce_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; */ //cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ; //cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ; //cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl; //Ali double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); /**Ali Comment start thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr)); **/ // Ali comment end //Ali //Ali double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(), nodes->getInfoVecs().nodeExtForceNormal.begin(), nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeCenterPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); } void SceCells::BC_Imp_M() { /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; MinX= *MinX_Itr ; MaxX= *MaxX_Itr ; MinY= *MinY_Itr ; MaxY= *MaxY_Itr ; */ //cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::growAtRandom_M(double dt) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; randomizeGrowth_M(); updateGrowthProgress_M(); decideIsScheduleToGrow_M(); //computeCellTargetLength_M(); //computeDistToCellCenter_M(); //findMinAndMaxDistToCenter_M(); //computeLenDiffExpCur_M(); //stretchCellGivenLenDiff_M(); addPointIfScheduledToGrow_M(); //decideIsScheduleToShrink_M();// AAMIRI May5 //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 adjustGrowthInfo_M(); } void SceCells::divide2D_M() { bool isDivisionPresent = decideIfGoingToDivide_M(); bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A //A&A if (isEnteringMitotic){ std::cout<< "I am in EnteringMitotic"<< std::endl; copyCellsEnterMitotic(); findHertwigAxis(); } //A&A if (!isDivisionPresent) { return; } //aniDebug = true; copyCellsPreDivision_M(); createTwoNewCellArr_M(); copyFirstCellArr_M(); copySecondCellArr_M(); updateActiveCellCount_M(); markIsDivideFalse_M(); //divDebug(); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ; if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } //Ali modified this function to introduce differential proliferation rates void SceCells::randomizeGrowth_M() { double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ; double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ; //cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); } void SceCells::updateGrowthProgress_M() { thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); /* thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); */ cout << " I am trying to update growth progress" << endl ; //double dummy=0 ; double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), DppGrowRegulator(dt,mitoticCheckPoint)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterY = 25.0; double laserCenterX = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A bool SceCells::decideIfAnyCellEnteringMitotic() { double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE cout << "I am in obtainAniRawDataGivenCellColor start"<<endl; uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin() ))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpExtForce;//AAMIRI double tmpCurv; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeExtForceT, nodeExtForceN;//AAMIRI double aniVal; double aniVal2; double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added //aniVal2=dppLevels_Cell[i] ; aniVal2=cellsDppLevel[i] ; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } cout << "I am in obtainAniRawDataGivenCellColor end"<<endl; return rawAniData; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); hipMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); hipMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); hipMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 hipMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); hipMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); hipMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI hipMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); hipMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; hipMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //hipMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); hipMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); hipMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); } void SceCells::handleMembrGrowth_M() { // figure out membr growth speed calMembrGrowSpeed_M(); // figure out which cells will add new point adjustMembrGrowSpeed_M(); decideIfAddMembrNode_M(); // add membr nodes addMembrNodes_M(); //membrDebug(); } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M() { // decide if add membrane node given current active node count and // membr growth progress uint curActCellCt = allocPara_m.currentActiveCellCount; thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); uint maxMembrNode = allocPara_m.maxMembrNodePerCell; /**Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center, CVector cell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = cell1Center - tmpCell1Center; for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = cell2Center - tmpCell2Center; for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); } for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); } for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); } assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; } void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir, double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) { CVector divDirUnit = divDir.getUnitVector(); double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; centerNew1 = oldCenter + lenChange * divDirUnit; centerNew2 = oldCenter - lenChange * divDirUnit; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter, std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { tmp1.clear(); tmp2.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecVal tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); } } } else { if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldCenter; CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter; double dotProduct = centerToPosDir * divDir; if (dotProduct > 0) { divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else { divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellDppHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_Dpp.begin() + allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali //std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } } } cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellDpp = cellDppHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } //Ali cout << "I want to write data" << endl ; // ofstream Stress_Strain_Single ; //Stress_Strain_Single.open("Stress_Strain_Single.txt"); //Stress_Strain_Single.close() ; //Ali result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali result.Cells_Extrem_Loc[0]=Tisu_MinX; result.Cells_Extrem_Loc[1]=Tisu_MaxX; result.Cells_Extrem_Loc[2]=Tisu_MinY; result.Cells_Extrem_Loc[3]=Tisu_MaxY ; result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ; //if (dt==curTime) { //result.Init_Displace=MaxX-MinX ; // } //Ali return result; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } //Ali function added for eventually computing pressure for each cells __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; }
7ae59d141c2700a5abb802a9c2cb17c4aeffbf70.cu
#include "SceCells.h" #include <cmath> double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceIIDiv_M[5]; __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } // //Ali __device__ double calExtForce(double& curTime) { return curTime * F_Ext_Incline_M2; } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017 std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, double InitTimeStage) { // curTime = 0.0 + 55800.0;//AAMIRIi curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; lastTimeExchange=0 ; firstTimeReadDpp=true ; //currentActiveCellCountOld=1 ; // small number tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); initialize_M(nodesInput); cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ; copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali //cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ; cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI std::cout << "finished " << std::endl; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ; //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali std::cout << " *** 1 ***" << endl; std::cout.flush(); this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff); cout<< "The important curTime used in simulation is here which is"<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; curTime = curTime + dt; std::cout << " *** 2 ***" << endl; std::cout.flush(); applySceCellDisc_M(); std::cout << " *** 3 ***" << endl; std::cout.flush(); //Ali computeCenterPos_M(); exchSignal(); BC_Imp_M() ; std::cout << " *** 3.5 ***" << endl; std::cout.flush(); //Ali applyMemForce_M(); std::cout << " *** 4 ***" << endl; std::cout.flush(); //Ali cmment // // computeCenterPos_M(); std::cout << " *** 5 ***" << endl; std::cout.flush(); //Ali cmment // growAtRandom_M(dt); std::cout << " *** 6 ***" << endl; std::cout.flush(); //if (curTime<3300.0) divide2D_M(); std::cout << " *** 7 ***" << endl; std::cout.flush(); distributeCellGrowthProgress_M(); std::cout << " *** 8 ***" << endl; std::cout.flush(); findTangentAndNormal_M();//AAMIRI ADDED May29 allComponentsMove_M(); std::cout << " *** 9 ***" << endl; std::cout.flush(); handleMembrGrowth_M(); std::cout << " *** 10 ***" << endl; std::cout.flush(); } void SceCells::exchSignal(){ if (firstTimeReadDpp) { uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ; signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ; cout << " I passed the initializtion for signaling module" << endl ; } lastTimeExchange=lastTimeExchange+dt ; cout << "last time exchange is " << lastTimeExchange << endl ; cout << "dt is " << dt << endl ; double exchPeriod=360 ; if ( lastTimeExchange>exchPeriod) { lastTimeExchange=0 ; //vector<CVector> cellCentersHost ; //cellCentersHost=getAllCellCenters(); //Ali cout << "I entered the function to update dpp" << endl ; thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin()); thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin()); thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin()); signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size()); thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ; //currentActiveCellCountOld=allocPara_m.currentActiveCellCount; } if (firstTimeReadDpp) { thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ; firstTimeReadDpp=false ; } } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { //void SceCells::getAllCellCenters() { //thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; //thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; //thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; thrust::host_vector<double> centerX( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerX.begin()); thrust::host_vector<double> centerY( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerY.begin()); thrust::host_vector<double> centerZ( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordZ.begin(), cellInfoVecs.centerCoordZ.begin() + allocPara_m.currentActiveCellCount, centerZ.begin()); //infoForSignal.sCenterX=centerX[4] ; //infoForSignal.sCenterY=centerY[4] ; //infoForSignal.sCenterZ=centerZ[4] ; std::vector<CVector> result; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); //infoForSignal.sCenterX=centerX[i] ; //infoForSignal.sCenterY=centerY[i] ; //infoForSignal.sCenterZ=centerZ[i] ; result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::copyCellsEnterMitotic() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir; divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added // std::vector<VecVal> tmp1Membr, tmp2Membr; CVector cell1Center, cell2Center; obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center, cell2Center); prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr); processMemVec(tmp1Membr, tmp2Membr); shiftIntnlNodesByCellCenter(cell1Center, cell2Center); assembleVecForTwoCells(i); } //divDebug(); } //A&A void SceCells::findHertwigAxis() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); double lenAlongMajorAxis; CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; //std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; //std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; //std::cout<<divDir.x<<"HertwigXdir " <<std::endl; //std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank]; } } void SceCells::copySecondCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRankMother = divAuxData.tmpCellRank_M[i]; uint cellRank = allocPara_m.currentActiveCellCount + i; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::applyMemForce_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; */ //cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ; //cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ; //cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl; //Ali double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); /**Ali Comment start thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr)); **/ // Ali comment end //Ali //Ali double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(), nodes->getInfoVecs().nodeExtForceNormal.begin(), nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeCenterPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); } void SceCells::BC_Imp_M() { /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; MinX= *MinX_Itr ; MaxX= *MaxX_Itr ; MinY= *MinY_Itr ; MaxY= *MaxY_Itr ; */ //cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::growAtRandom_M(double dt) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; randomizeGrowth_M(); updateGrowthProgress_M(); decideIsScheduleToGrow_M(); //computeCellTargetLength_M(); //computeDistToCellCenter_M(); //findMinAndMaxDistToCenter_M(); //computeLenDiffExpCur_M(); //stretchCellGivenLenDiff_M(); addPointIfScheduledToGrow_M(); //decideIsScheduleToShrink_M();// AAMIRI May5 //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 adjustGrowthInfo_M(); } void SceCells::divide2D_M() { bool isDivisionPresent = decideIfGoingToDivide_M(); bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A //A&A if (isEnteringMitotic){ std::cout<< "I am in EnteringMitotic"<< std::endl; copyCellsEnterMitotic(); findHertwigAxis(); } //A&A if (!isDivisionPresent) { return; } //aniDebug = true; copyCellsPreDivision_M(); createTwoNewCellArr_M(); copyFirstCellArr_M(); copySecondCellArr_M(); updateActiveCellCount_M(); markIsDivideFalse_M(); //divDebug(); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ; if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } //Ali modified this function to introduce differential proliferation rates void SceCells::randomizeGrowth_M() { double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ; double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ; //cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); } void SceCells::updateGrowthProgress_M() { thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); /* thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); */ cout << " I am trying to update growth progress" << endl ; //double dummy=0 ; double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), DppGrowRegulator(dt,mitoticCheckPoint)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterY = 25.0; double laserCenterX = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A bool SceCells::decideIfAnyCellEnteringMitotic() { double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE cout << "I am in obtainAniRawDataGivenCellColor start"<<endl; uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin() ))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpExtForce;//AAMIRI double tmpCurv; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeExtForceT, nodeExtForceN;//AAMIRI double aniVal; double aniVal2; double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added //aniVal2=dppLevels_Cell[i] ; aniVal2=cellsDppLevel[i] ; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } cout << "I am in obtainAniRawDataGivenCellColor end"<<endl; return rawAniData; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); cudaMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); cudaMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); cudaMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 cudaMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); cudaMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); cudaMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI cudaMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); cudaMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; cudaMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //cudaMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); cudaMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); cudaMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); } void SceCells::handleMembrGrowth_M() { // figure out membr growth speed calMembrGrowSpeed_M(); // figure out which cells will add new point adjustMembrGrowSpeed_M(); decideIfAddMembrNode_M(); // add membr nodes addMembrNodes_M(); //membrDebug(); } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M() { // decide if add membrane node given current active node count and // membr growth progress uint curActCellCt = allocPara_m.currentActiveCellCount; thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); uint maxMembrNode = allocPara_m.maxMembrNodePerCell; /**Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center, CVector cell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = cell1Center - tmpCell1Center; for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = cell2Center - tmpCell2Center; for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); } for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); } for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); } assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; } void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir, double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) { CVector divDirUnit = divDir.getUnitVector(); double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; centerNew1 = oldCenter + lenChange * divDirUnit; centerNew2 = oldCenter - lenChange * divDirUnit; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter, std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { tmp1.clear(); tmp2.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecVal tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); } } } else { if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldCenter; CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter; double dotProduct = centerToPosDir * divDir; if (dotProduct > 0) { divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else { divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellDppHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_Dpp.begin() + allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali //std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } } } cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellDpp = cellDppHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } //Ali cout << "I want to write data" << endl ; // ofstream Stress_Strain_Single ; //Stress_Strain_Single.open("Stress_Strain_Single.txt"); //Stress_Strain_Single.close() ; //Ali result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali result.Cells_Extrem_Loc[0]=Tisu_MinX; result.Cells_Extrem_Loc[1]=Tisu_MaxX; result.Cells_Extrem_Loc[2]=Tisu_MinY; result.Cells_Extrem_Loc[3]=Tisu_MaxY ; result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ; //if (dt==curTime) { //result.Init_Displace=MaxX-MinX ; // } //Ali return result; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } //Ali function added for eventually computing pressure for each cells __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; }
5ce39274d5642a23971afbe8cc44a6d5ac1a8153.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <kernel.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) y[i] = a * x[i] + y[i]; }
5ce39274d5642a23971afbe8cc44a6d5ac1a8153.cu
#include <kernel.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) y[i] = a * x[i] + y[i]; }
1532615083f55a144e4b5571e264ba5ad72541f7.hip
// !!! This is a file automatically generated by hipify!!! // This Program is Written by Abubakr Shafique (abubakr.shafique@gmail.com) #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Inversion_CUDA.h" __global__ void Inversion_CUDA(unsigned char* Image, int Channels); void Image_Inversion_CUDA(unsigned char* Input_Image, int Height, int Width, int Channels){ unsigned char* Dev_Input_Image = NULL; //allocate the memory in gpu hipMalloc((void**)&Dev_Input_Image, Height * Width * Channels); //copy data from CPU to GPU hipMemcpy(Dev_Input_Image, Input_Image, Height * Width * Channels, hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); Inversion_CUDA << <Grid_Image, 1 >> >(Dev_Input_Image, Channels); //copy processed data back to cpu from gpu hipMemcpy(Input_Image, Dev_Input_Image, Height * Width * Channels, hipMemcpyDeviceToHost); //free gpu mempry hipFree(Dev_Input_Image); } __global__ void Inversion_CUDA(unsigned char* Image, int Channels){ int x = blockIdx.x; int y = blockIdx.y; int idx = (x + y * gridDim.x) * Channels; for (int i = 0; i < Channels; i++){ Image[idx + i] = 255 - Image[idx + i]; } }
1532615083f55a144e4b5571e264ba5ad72541f7.cu
// This Program is Written by Abubakr Shafique (abubakr.shafique@gmail.com) #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Inversion_CUDA.h" __global__ void Inversion_CUDA(unsigned char* Image, int Channels); void Image_Inversion_CUDA(unsigned char* Input_Image, int Height, int Width, int Channels){ unsigned char* Dev_Input_Image = NULL; //allocate the memory in gpu cudaMalloc((void**)&Dev_Input_Image, Height * Width * Channels); //copy data from CPU to GPU cudaMemcpy(Dev_Input_Image, Input_Image, Height * Width * Channels, cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); Inversion_CUDA << <Grid_Image, 1 >> >(Dev_Input_Image, Channels); //copy processed data back to cpu from gpu cudaMemcpy(Input_Image, Dev_Input_Image, Height * Width * Channels, cudaMemcpyDeviceToHost); //free gpu mempry cudaFree(Dev_Input_Image); } __global__ void Inversion_CUDA(unsigned char* Image, int Channels){ int x = blockIdx.x; int y = blockIdx.y; int idx = (x + y * gridDim.x) * Channels; for (int i = 0; i < Channels; i++){ Image[idx + i] = 255 - Image[idx + i]; } }
8c73f86a7654d4a433d6202019f15fb47c2a205d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #define CHECK(call) { \ const hipError_t error = call; \ if (error != hipSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } \ void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Array do not match\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } void initialData(float *ip, int size) { time_t t; srand((unsigned) time(&t)); for (int i=0; i<size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { // int i = threadIdx.x; // int i = blockIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; hipSetDevice(dev); int nElem = 32; printf("Vector size %d\n", nElem); size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); float *d_A, *d_B, *d_C; hipMalloc((float **)&d_A, nBytes); hipMalloc((float **)&d_B, nBytes); hipMalloc((float **)&d_C, nBytes); hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice); // dim3 block(nElem); // dim3 grid(1); dim3 block(1); dim3 grid(nElem); hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C); printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x); hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); sumArraysOnHost(h_A, h_B, hostRef, nElem); checkResult(hostRef, gpuRef, nElem); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); hipDeviceReset(); return(0); }
8c73f86a7654d4a433d6202019f15fb47c2a205d.cu
#include <cuda_runtime.h> #include <stdio.h> #define CHECK(call) { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } \ void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Array do not match\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } void initialData(float *ip, int size) { time_t t; srand((unsigned) time(&t)); for (int i=0; i<size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { // int i = threadIdx.x; // int i = blockIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; cudaSetDevice(dev); int nElem = 32; printf("Vector size %d\n", nElem); size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A, nBytes); cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice); // dim3 block(nElem); // dim3 grid(1); dim3 block(1); dim3 grid(nElem); sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C); printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x); cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); sumArraysOnHost(h_A, h_B, hostRef, nElem); checkResult(hostRef, gpuRef, nElem); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); cudaDeviceReset(); return(0); }
b1de11ca759162a28ed759808598e4f249360e82.hip
// !!! This is a file automatically generated by hipify!!! // Sphere geometry container. // ----------------------------------------------------------------------------- // Copyright (C) 2012, See authors // // This program is open source and distributed under the New BSD License. See // license for more detail. // ----------------------------------------------------------------------------- #include <SphereContainer.h> #include <HyperCubes.h> #include <Primitives/SphereCone.h> #include <Primitives/HyperCube.h> #include <SphereGeometry.h> #include <algorithm> #include <ostream> #include <thrust/for_each.h> #include <thrust/copy.h> SphereContainer::SphereContainer(SpheresGeometry& spheres, thrust::device_vector<unsigned int>& indices) : spheres(spheres), indices1(indices), indices2(indices.capacity()), currentIndices(indices1), nextIndices(indices2), doneIndices(indices.capacity()) { nextIndices.resize(0); doneIndices.resize(0); } struct CreateCones { __host__ __device__ SphereCone operator()(const thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> c) const { const HyperCube cube(thrust::get<0>(c), thrust::get<1>(c), thrust::get<2>(c), thrust::get<3>(c), thrust::get<4>(c), thrust::get<5>(c)); return SphereCone::FromCube(cube); } }; static CreateCones createCones; __constant__ SphereCone d_cone; // __constant__ float d_invSinToAngle; // __constant__ float d_cosToAngleSqr; struct CompareConeSphere { CompareConeSphere(thrust::device_vector<SphereCone>& cones, unsigned int index) { SphereCone* cone = thrust::raw_pointer_cast(cones.data()) + index; hipMemcpyToSymbol(d_cone, (void*)cone, sizeof(SphereCone), 0, hipMemcpyDeviceToDevice); // const float invSinToAngle = 1.0f / std::sin(spreadAngle); // const float cosToAngleSqr = std::cos(spreadAngle) * cos(spreadAngle); // hipMemcpyToSymbol(d_invSinToAngle, (void*)&invSinToAngle, sizeof(float), 0, hipMemcpyHostToDevice); // hipMemcpyToSymbol(d_cosToAngleSqr, (void*)&cosToAngleSqr, sizeof(float), 0, hipMemcpyHostToDevice); } __device__ bool operator()(const Sphere s) { return d_cone.DoesIntersect(s);//, d_invSinToAngle, d_cosToAngleSqr); } }; SphereContainer::SphereContainer(HyperCubes& cubes, SpheresGeometry& spheres, uint* spherePartitionStart) : spheres(spheres), indices1(spheres.Size()*cubes.Size()), indices2(spheres.Size()*cubes.Size()), currentIndices(indices1), nextIndices(indices2), doneIndices(spheres.Size()*cubes.Size()) { nextIndices.resize(0); doneIndices.resize(0); thrust::device_vector<SphereCone> cones(cubes.Size()); thrust::transform(cubes.Begin(), cubes.End(), cones.begin(), createCones); //std::cout << cones << std::endl; spherePartitionStart[0] = 0; for (int c = 0; c < cubes.Size(); ++c) { CompareConeSphere compareConeSphere(cones, c); UintIterator beginIndices = BeginCurrentIndices() + spherePartitionStart[c]; UintIterator itr = thrust::copy_if(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(spheres.Size()), spheres.BeginSpheres(), beginIndices, compareConeSphere); spherePartitionStart[c+1] = spherePartitionStart[c] + (itr - beginIndices); //std::cout << spherePartitionStart[c+1] << " = " << spherePartitionStart[c] << " + " << (itr - beginIndices) << std::endl; } const size_t currentSize = spherePartitionStart[cubes.Size()]; currentIndices.resize(currentSize); } struct PartitionLeftRight { unsigned int* nextIndices; PartitionLeftRight(thrust::device_vector<unsigned int>& nextIs) : nextIndices(thrust::raw_pointer_cast(nextIs.data())) {} __device__ void operator()(thrust::tuple<PartitionSide, unsigned int, unsigned int, // partitionSide, leftIndex, rightIndex, unsigned int> input) { // index const PartitionSide side = thrust::get<0>(input); const unsigned int sphereIndex = thrust::get<3>(input); if (side & LEFT) { const unsigned int leftIndex = thrust::get<1>(input); nextIndices[leftIndex] = sphereIndex; } if (side & RIGHT) { const unsigned int rightIndex = thrust::get<2>(input); nextIndices[rightIndex] = sphereIndex; } } }; void SphereContainer::Partition(thrust::device_vector<PartitionSide>& partitionSides, thrust::device_vector<unsigned int>& leftIndices, thrust::device_vector<unsigned int>& rightIndices) { // std::cout << "--SphereContainer::Partition--:" << std::endl; const unsigned int nextSize = rightIndices[rightIndices.size()-1]; nextIndices.resize(nextSize); thrust::zip_iterator<thrust::tuple<PartitionSideIterator, UintIterator, UintIterator, UintIterator> > input = thrust::make_zip_iterator(thrust::make_tuple(partitionSides.begin(), leftIndices.begin(), rightIndices.begin(), currentIndices.begin())); PartitionLeftRight partitionLeftRight(nextIndices); thrust::for_each(input, input + CurrentSize(), partitionLeftRight); // TODO change to device_vector.swap, but that crashes right now std::swap(currentIndices, nextIndices); // currentIndices.swap(nextIndices); } __constant__ unsigned int d_leafIndexOffset; struct PartitionLeafsKernel { // Rays unsigned int *nextIndices; unsigned int *leafIndices; // Node values bool* leafMarkers; uint2* nodePartitions; unsigned int* nodeLeafIndices; PartitionLeafsKernel(thrust::device_vector<unsigned int>& nIndices, thrust::device_vector<unsigned int>& lIndices, thrust::device_vector<bool>& lMarkers, thrust::device_vector<uint2>& nPartitions, thrust::device_vector<unsigned int>& nlIndices, const unsigned int leafIndexOffset) : nextIndices(thrust::raw_pointer_cast(nIndices.data())), leafIndices(thrust::raw_pointer_cast(lIndices.data()) + leafIndexOffset), leafMarkers(thrust::raw_pointer_cast(lMarkers.data())), nodePartitions(thrust::raw_pointer_cast(nPartitions.data())), nodeLeafIndices(thrust::raw_pointer_cast(nlIndices.data())) {} __host__ __device__ unsigned int operator()(const thrust::tuple<unsigned int, unsigned int > input, // owner, sphereIndex const unsigned int threadId) const { const unsigned int owner = thrust::get<0>(input); const unsigned int sphereIndex = thrust::get<1>(input); const unsigned int nodeLeafIndex = nodeLeafIndices[owner]; const bool isLeaf = leafMarkers[owner]; if (isLeaf) { uint2 partitioning = nodePartitions[owner]; unsigned int partitionIndex = threadId - partitioning.x; unsigned int leafIndex = nodeLeafIndex + partitionIndex; leafIndices[leafIndex] = sphereIndex; return leafIndex; } else { const unsigned int index = threadId - nodeLeafIndex; nextIndices[index] = sphereIndex; return index; } } }; void SphereContainer::PartitionLeafs(thrust::device_vector<bool>& isLeaf, thrust::device_vector<unsigned int>& leafNodeIndices, thrust::device_vector<uint2>& spherePartitions, thrust::device_vector<unsigned int>& owners) { /* std::cout << "--SphereContainer::PartitionLeafs--:" << std::endl; std::cout << "isLeaf:\n" << isLeaf << std::endl; std::cout << "leafNodeIndices:\n" << leafNodeIndices << std::endl; std::cout << "spherePartitions:\n" << spherePartitions << std::endl; std::cout << "owners:\n" << owners << std::endl; std::cout << ToString() << std::endl; */ const unsigned int newLeafs = leafNodeIndices[leafNodeIndices.size()-1]; const unsigned int prevLeafIndiceAmount = doneIndices.size(); nextIndices.resize(currentIndices.size() - newLeafs); // shrink next ray buffer doneIndices.resize(doneIndices.size() + newLeafs); // expand leaf // TODO replace owners with work queue thrust::zip_iterator<thrust::tuple<UintIterator, UintIterator> > input = thrust::make_zip_iterator(thrust::make_tuple(owners.begin(), BeginCurrentIndices())); PartitionLeafsKernel partitionLeafs(nextIndices, doneIndices, isLeaf, spherePartitions, leafNodeIndices, prevLeafIndiceAmount); thrust::transform(input, input + CurrentSize(), thrust::counting_iterator<unsigned int>(0), owners.begin(), partitionLeafs); // std::cout << "index moved to:\n" << owners << std::endl; // TODO change to device_vector.swap, but that crashes right now std::swap(currentIndices, nextIndices); //currentIndices.swap(nextIndices); // std::cout << ToString() << std::endl; } std::string SphereContainer::ToString() const { std::ostringstream out; if (CurrentSize() > 0) { out << "Current spheres:"; for (size_t i = 0; i < CurrentSize(); ++i) { const unsigned int id = currentIndices[i]; out << "\n" << i << ": [id: " << id << ", " << spheres.Get(id) << "]"; } if (DoneSize() > 0) out << "\n"; } if (DoneSize() > 0) { out << "Leaf spheres:"; for (size_t i = 0; i < DoneSize(); ++i) { const unsigned int id = doneIndices[i]; out << "\n" << i << ": [id: " << id << ", " << spheres.Get(id) << "]"; } } return out.str(); }
b1de11ca759162a28ed759808598e4f249360e82.cu
// Sphere geometry container. // ----------------------------------------------------------------------------- // Copyright (C) 2012, See authors // // This program is open source and distributed under the New BSD License. See // license for more detail. // ----------------------------------------------------------------------------- #include <SphereContainer.h> #include <HyperCubes.h> #include <Primitives/SphereCone.h> #include <Primitives/HyperCube.h> #include <SphereGeometry.h> #include <algorithm> #include <ostream> #include <thrust/for_each.h> #include <thrust/copy.h> SphereContainer::SphereContainer(SpheresGeometry& spheres, thrust::device_vector<unsigned int>& indices) : spheres(spheres), indices1(indices), indices2(indices.capacity()), currentIndices(indices1), nextIndices(indices2), doneIndices(indices.capacity()) { nextIndices.resize(0); doneIndices.resize(0); } struct CreateCones { __host__ __device__ SphereCone operator()(const thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> c) const { const HyperCube cube(thrust::get<0>(c), thrust::get<1>(c), thrust::get<2>(c), thrust::get<3>(c), thrust::get<4>(c), thrust::get<5>(c)); return SphereCone::FromCube(cube); } }; static CreateCones createCones; __constant__ SphereCone d_cone; // __constant__ float d_invSinToAngle; // __constant__ float d_cosToAngleSqr; struct CompareConeSphere { CompareConeSphere(thrust::device_vector<SphereCone>& cones, unsigned int index) { SphereCone* cone = thrust::raw_pointer_cast(cones.data()) + index; cudaMemcpyToSymbol(d_cone, (void*)cone, sizeof(SphereCone), 0, cudaMemcpyDeviceToDevice); // const float invSinToAngle = 1.0f / std::sin(spreadAngle); // const float cosToAngleSqr = std::cos(spreadAngle) * cos(spreadAngle); // cudaMemcpyToSymbol(d_invSinToAngle, (void*)&invSinToAngle, sizeof(float), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(d_cosToAngleSqr, (void*)&cosToAngleSqr, sizeof(float), 0, cudaMemcpyHostToDevice); } __device__ bool operator()(const Sphere s) { return d_cone.DoesIntersect(s);//, d_invSinToAngle, d_cosToAngleSqr); } }; SphereContainer::SphereContainer(HyperCubes& cubes, SpheresGeometry& spheres, uint* spherePartitionStart) : spheres(spheres), indices1(spheres.Size()*cubes.Size()), indices2(spheres.Size()*cubes.Size()), currentIndices(indices1), nextIndices(indices2), doneIndices(spheres.Size()*cubes.Size()) { nextIndices.resize(0); doneIndices.resize(0); thrust::device_vector<SphereCone> cones(cubes.Size()); thrust::transform(cubes.Begin(), cubes.End(), cones.begin(), createCones); //std::cout << cones << std::endl; spherePartitionStart[0] = 0; for (int c = 0; c < cubes.Size(); ++c) { CompareConeSphere compareConeSphere(cones, c); UintIterator beginIndices = BeginCurrentIndices() + spherePartitionStart[c]; UintIterator itr = thrust::copy_if(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(spheres.Size()), spheres.BeginSpheres(), beginIndices, compareConeSphere); spherePartitionStart[c+1] = spherePartitionStart[c] + (itr - beginIndices); //std::cout << spherePartitionStart[c+1] << " = " << spherePartitionStart[c] << " + " << (itr - beginIndices) << std::endl; } const size_t currentSize = spherePartitionStart[cubes.Size()]; currentIndices.resize(currentSize); } struct PartitionLeftRight { unsigned int* nextIndices; PartitionLeftRight(thrust::device_vector<unsigned int>& nextIs) : nextIndices(thrust::raw_pointer_cast(nextIs.data())) {} __device__ void operator()(thrust::tuple<PartitionSide, unsigned int, unsigned int, // partitionSide, leftIndex, rightIndex, unsigned int> input) { // index const PartitionSide side = thrust::get<0>(input); const unsigned int sphereIndex = thrust::get<3>(input); if (side & LEFT) { const unsigned int leftIndex = thrust::get<1>(input); nextIndices[leftIndex] = sphereIndex; } if (side & RIGHT) { const unsigned int rightIndex = thrust::get<2>(input); nextIndices[rightIndex] = sphereIndex; } } }; void SphereContainer::Partition(thrust::device_vector<PartitionSide>& partitionSides, thrust::device_vector<unsigned int>& leftIndices, thrust::device_vector<unsigned int>& rightIndices) { // std::cout << "--SphereContainer::Partition--:" << std::endl; const unsigned int nextSize = rightIndices[rightIndices.size()-1]; nextIndices.resize(nextSize); thrust::zip_iterator<thrust::tuple<PartitionSideIterator, UintIterator, UintIterator, UintIterator> > input = thrust::make_zip_iterator(thrust::make_tuple(partitionSides.begin(), leftIndices.begin(), rightIndices.begin(), currentIndices.begin())); PartitionLeftRight partitionLeftRight(nextIndices); thrust::for_each(input, input + CurrentSize(), partitionLeftRight); // TODO change to device_vector.swap, but that crashes right now std::swap(currentIndices, nextIndices); // currentIndices.swap(nextIndices); } __constant__ unsigned int d_leafIndexOffset; struct PartitionLeafsKernel { // Rays unsigned int *nextIndices; unsigned int *leafIndices; // Node values bool* leafMarkers; uint2* nodePartitions; unsigned int* nodeLeafIndices; PartitionLeafsKernel(thrust::device_vector<unsigned int>& nIndices, thrust::device_vector<unsigned int>& lIndices, thrust::device_vector<bool>& lMarkers, thrust::device_vector<uint2>& nPartitions, thrust::device_vector<unsigned int>& nlIndices, const unsigned int leafIndexOffset) : nextIndices(thrust::raw_pointer_cast(nIndices.data())), leafIndices(thrust::raw_pointer_cast(lIndices.data()) + leafIndexOffset), leafMarkers(thrust::raw_pointer_cast(lMarkers.data())), nodePartitions(thrust::raw_pointer_cast(nPartitions.data())), nodeLeafIndices(thrust::raw_pointer_cast(nlIndices.data())) {} __host__ __device__ unsigned int operator()(const thrust::tuple<unsigned int, unsigned int > input, // owner, sphereIndex const unsigned int threadId) const { const unsigned int owner = thrust::get<0>(input); const unsigned int sphereIndex = thrust::get<1>(input); const unsigned int nodeLeafIndex = nodeLeafIndices[owner]; const bool isLeaf = leafMarkers[owner]; if (isLeaf) { uint2 partitioning = nodePartitions[owner]; unsigned int partitionIndex = threadId - partitioning.x; unsigned int leafIndex = nodeLeafIndex + partitionIndex; leafIndices[leafIndex] = sphereIndex; return leafIndex; } else { const unsigned int index = threadId - nodeLeafIndex; nextIndices[index] = sphereIndex; return index; } } }; void SphereContainer::PartitionLeafs(thrust::device_vector<bool>& isLeaf, thrust::device_vector<unsigned int>& leafNodeIndices, thrust::device_vector<uint2>& spherePartitions, thrust::device_vector<unsigned int>& owners) { /* std::cout << "--SphereContainer::PartitionLeafs--:" << std::endl; std::cout << "isLeaf:\n" << isLeaf << std::endl; std::cout << "leafNodeIndices:\n" << leafNodeIndices << std::endl; std::cout << "spherePartitions:\n" << spherePartitions << std::endl; std::cout << "owners:\n" << owners << std::endl; std::cout << ToString() << std::endl; */ const unsigned int newLeafs = leafNodeIndices[leafNodeIndices.size()-1]; const unsigned int prevLeafIndiceAmount = doneIndices.size(); nextIndices.resize(currentIndices.size() - newLeafs); // shrink next ray buffer doneIndices.resize(doneIndices.size() + newLeafs); // expand leaf // TODO replace owners with work queue thrust::zip_iterator<thrust::tuple<UintIterator, UintIterator> > input = thrust::make_zip_iterator(thrust::make_tuple(owners.begin(), BeginCurrentIndices())); PartitionLeafsKernel partitionLeafs(nextIndices, doneIndices, isLeaf, spherePartitions, leafNodeIndices, prevLeafIndiceAmount); thrust::transform(input, input + CurrentSize(), thrust::counting_iterator<unsigned int>(0), owners.begin(), partitionLeafs); // std::cout << "index moved to:\n" << owners << std::endl; // TODO change to device_vector.swap, but that crashes right now std::swap(currentIndices, nextIndices); //currentIndices.swap(nextIndices); // std::cout << ToString() << std::endl; } std::string SphereContainer::ToString() const { std::ostringstream out; if (CurrentSize() > 0) { out << "Current spheres:"; for (size_t i = 0; i < CurrentSize(); ++i) { const unsigned int id = currentIndices[i]; out << "\n" << i << ": [id: " << id << ", " << spheres.Get(id) << "]"; } if (DoneSize() > 0) out << "\n"; } if (DoneSize() > 0) { out << "Leaf spheres:"; for (size_t i = 0; i < DoneSize(); ++i) { const unsigned int id = doneIndices[i]; out << "\n" << i << ": [id: " << id << ", " << spheres.Get(id) << "]"; } } return out.str(); }
fb946cf99b80cdd9f3171ff128fc2b29d70c3b21.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "subtract_test.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double2 *a = NULL; hipMalloc(&a, XSIZE*YSIZE); double2 *b = NULL; hipMalloc(&b, XSIZE*YSIZE); double2 *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( subtract_test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( subtract_test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( subtract_test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fb946cf99b80cdd9f3171ff128fc2b29d70c3b21.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "subtract_test.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double2 *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); double2 *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); double2 *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); subtract_test<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { subtract_test<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { subtract_test<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1dee2eca97f61d2495d8d937f7e021ea36de7f4a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); float *z = NULL; hipMalloc(&z, XSIZE*YSIZE); float *deltaX = NULL; hipMalloc(&deltaX, XSIZE*YSIZE); float *deltaY = NULL; hipMalloc(&deltaY, XSIZE*YSIZE); float *deltaZ = NULL; hipMalloc(&deltaZ, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,deltaX,deltaY,deltaZ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,deltaX,deltaY,deltaZ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,deltaX,deltaY,deltaZ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1dee2eca97f61d2495d8d937f7e021ea36de7f4a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); float *z = NULL; cudaMalloc(&z, XSIZE*YSIZE); float *deltaX = NULL; cudaMalloc(&deltaX, XSIZE*YSIZE); float *deltaY = NULL; cudaMalloc(&deltaY, XSIZE*YSIZE); float *deltaZ = NULL; cudaMalloc(&deltaZ, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add<<<gridBlock,threadBlock>>>(x,y,z,deltaX,deltaY,deltaZ); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add<<<gridBlock,threadBlock>>>(x,y,z,deltaX,deltaY,deltaZ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add<<<gridBlock,threadBlock>>>(x,y,z,deltaX,deltaY,deltaZ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4368daf56ff6b7d95a1584f51428e3b8c9b83d95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/polar_impl.cuh" #include <math.h> #include <stdint.h> #include <complex.h> #include "plugin/device/cpu/kernel/nnacl/op_base.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh" constexpr uint kThreadsPerBlock = cuda::elementwise::kThreadsPerBlock; template <typename R> using Complex = mindspore::utils::Complex<R>; template <typename T, typename S> struct PolarFunctor { __device__ __forceinline__ S operator()(const T abs, const T angle) const { S output = 0; output.real(abs * std::cos(angle)); output.imag(abs * std::sin(angle)); return output; } }; template <typename Func, uint vec_size, typename T, typename S> __device__ __forceinline__ void NormalCall(Func func, const T *abs_addr, const T *angle_addr, S *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; output[index] = func(abs_addr[index], angle_addr[index]); } } } template <typename Func, uint vec_size, typename T, typename S> __device__ __forceinline__ void VectorizedCall(Func func, const T *abs_addr, const T *angle_addr, S *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T, vec_size>; using VecS = cuda::elementwise::AlignVec<S, vec_size>; auto vec_abs = reinterpret_cast<const VecT *>(abs_addr + offset); auto vec_angle = reinterpret_cast<const VecT *>(angle_addr + offset); auto vec_output = reinterpret_cast<VecS *>(output + offset); VecT abs = vec_abs[tid]; VecT angle = vec_angle[tid]; VecS out{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { out.elements_[j] = func(abs.elements_[j], angle.elements_[j]); } vec_output[tid] = out; } template <typename Func, uint vec_size, typename T, typename S> __global__ void PolarVectorized(Func func, const T *abs_addr, const T *angle_addr, S *output, uint num_of_elements) { uint elements_per_block = kThreadsPerBlock * vec_size; for (uint offset = elements_per_block * blockIdx.x; offset < num_of_elements; offset += elements_per_block * gridDim.x) { uint remaining = num_of_elements - offset; if (remaining < elements_per_block) { NormalCall<Func, vec_size, T>(func, abs_addr, angle_addr, output, offset, remaining); } else { VectorizedCall<Func, vec_size, T>(func, abs_addr, angle_addr, output, offset); } } } template <typename T, typename S> void CalPolar(const size_t size, const T *abs, const T *angle, S *output, const uint32_t &device_id, hipStream_t cuda_stream) { constexpr uint vec_size = cuda::elementwise::VecSize<T>(); const auto block_x = uint(kThreadsPerBlock); const uint elements_per_block = kThreadsPerBlock * vec_size; const auto grid_x = uint(UP_DIV(size, elements_per_block)); dim3 block{block_x}; dim3 grid{grid_x}; PolarFunctor<T, S> functor{}; hipLaunchKernelGGL(( PolarVectorized<PolarFunctor<T, S>, vec_size, T, S>) , dim3(grid), dim3(block), 0, cuda_stream, functor, abs, angle, output, size); return; } template CUDA_LIB_EXPORT void CalPolar<float, Complex<float>>(const size_t size, const float *abs, const float *angle, Complex<float> *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolar<double, Complex<double>>(const size_t size, const double *abs, const double *angle, Complex<double> *output, const uint32_t &device_id, hipStream_t cuda_stream);
4368daf56ff6b7d95a1584f51428e3b8c9b83d95.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/polar_impl.cuh" #include <math.h> #include <stdint.h> #include <complex.h> #include "plugin/device/cpu/kernel/nnacl/op_base.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh" constexpr uint kThreadsPerBlock = cuda::elementwise::kThreadsPerBlock; template <typename R> using Complex = mindspore::utils::Complex<R>; template <typename T, typename S> struct PolarFunctor { __device__ __forceinline__ S operator()(const T abs, const T angle) const { S output = 0; output.real(abs * std::cos(angle)); output.imag(abs * std::sin(angle)); return output; } }; template <typename Func, uint vec_size, typename T, typename S> __device__ __forceinline__ void NormalCall(Func func, const T *abs_addr, const T *angle_addr, S *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; output[index] = func(abs_addr[index], angle_addr[index]); } } } template <typename Func, uint vec_size, typename T, typename S> __device__ __forceinline__ void VectorizedCall(Func func, const T *abs_addr, const T *angle_addr, S *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T, vec_size>; using VecS = cuda::elementwise::AlignVec<S, vec_size>; auto vec_abs = reinterpret_cast<const VecT *>(abs_addr + offset); auto vec_angle = reinterpret_cast<const VecT *>(angle_addr + offset); auto vec_output = reinterpret_cast<VecS *>(output + offset); VecT abs = vec_abs[tid]; VecT angle = vec_angle[tid]; VecS out{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { out.elements_[j] = func(abs.elements_[j], angle.elements_[j]); } vec_output[tid] = out; } template <typename Func, uint vec_size, typename T, typename S> __global__ void PolarVectorized(Func func, const T *abs_addr, const T *angle_addr, S *output, uint num_of_elements) { uint elements_per_block = kThreadsPerBlock * vec_size; for (uint offset = elements_per_block * blockIdx.x; offset < num_of_elements; offset += elements_per_block * gridDim.x) { uint remaining = num_of_elements - offset; if (remaining < elements_per_block) { NormalCall<Func, vec_size, T>(func, abs_addr, angle_addr, output, offset, remaining); } else { VectorizedCall<Func, vec_size, T>(func, abs_addr, angle_addr, output, offset); } } } template <typename T, typename S> void CalPolar(const size_t size, const T *abs, const T *angle, S *output, const uint32_t &device_id, cudaStream_t cuda_stream) { constexpr uint vec_size = cuda::elementwise::VecSize<T>(); const auto block_x = uint(kThreadsPerBlock); const uint elements_per_block = kThreadsPerBlock * vec_size; const auto grid_x = uint(UP_DIV(size, elements_per_block)); dim3 block{block_x}; dim3 grid{grid_x}; PolarFunctor<T, S> functor{}; PolarVectorized<PolarFunctor<T, S>, vec_size, T, S> <<<grid, block, 0, cuda_stream>>>(functor, abs, angle, output, size); return; } template CUDA_LIB_EXPORT void CalPolar<float, Complex<float>>(const size_t size, const float *abs, const float *angle, Complex<float> *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolar<double, Complex<double>>(const size_t size, const double *abs, const double *angle, Complex<double> *output, const uint32_t &device_id, cudaStream_t cuda_stream);
655f0893e527347af4b749f848be691edbe29412.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> const int MAX_THREAD_NUMBER = 1000000; __device__ long long counterArray[MAX_THREAD_NUMBER] = {0}; extern "C" __device__ void bambooProfile(long bambooIndex) { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; long long index = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; counterArray[index]++; }
655f0893e527347af4b749f848be691edbe29412.cu
#include <stdio.h> #include <cuda.h> const int MAX_THREAD_NUMBER = 1000000; __device__ long long counterArray[MAX_THREAD_NUMBER] = {0}; extern "C" __device__ void bambooProfile(long bambooIndex) { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; long long index = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; counterArray[index]++; }
d07845b1cf486e1424df6f83604df869aeac2d03.hip
// !!! This is a file automatically generated by hipify!!! /* Author: Luis Carlos Arias Camacho Student ID: A01364808 ASSIGNMENT 4 */ #include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <hip/hip_runtime.h> #include <chrono> #include <math.h> #define img_dest "Images/" #define default_image "dog1.jpeg" #define C_SIZE 256 using namespace std; //Create input histogram // input - input images // histo - histogram // size - width * height of the image void create_input_histogram(const cv::Mat &input, int * histo, int size){ for (int i = 0; i < size; i++) histo[input.ptr()[i]]++; } //This function is used to normalize an histogram // histo - imput histogram as a one dimentional array of ints // n_histo - output normalized histogram as a one dimentional array of ints // size - size of the histograms void normalize_histogram(int * histo, int * n_histo, int size){ float step = size / (C_SIZE - 1); float sum = 0; for(int i=0; i < C_SIZE; i++){ sum += (float)histo[i]; n_histo[i] = (int)floor(sum / step); } } //Print the histogram void print_histogram(int * histo){ for(int i = 0; i < C_SIZE; i++) printf("%d : %d\n", i, histo[i]); } // Write image in cpu void write_image(const cv::Mat &input, cv::Mat &output, int * n_histo, int size){ for (int i = 0; i < size; i++) output.ptr()[i] = n_histo[input.ptr()[i]]; } // Histogram equalization on cpu // imput - input image //output - output image //imageName - path to achieve the image void equalizer_cpu(const cv::Mat &input, cv::Mat &output, int * histo, int * n_histo, string imageName){ int width = input.cols; int height = input.rows; int size_ = width * height; //Create the histogram for the input image create_input_histogram(input, histo, size_); //Create normalized histogram normalize_histogram(histo, n_histo, size_); //Write image with normalized histogram on output write_image(input, output, n_histo, size_); //Save the image cv::imwrite("Images/eq_cpu_" + imageName , output); } //This function converts a colored imege to a grayscale image // input - input image one dimensional array // ouput - output image one dimensional array // width, height - width and height of the images // colorWidthStep - number of color bytes (cols * colors) // grayWidthStep - number of gray bytes __global__ void bgr_to_gray_kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)){ const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const int gray_tid = yIndex * grayWidthStep + xIndex; const unsigned char blue = input[color_tid]; const unsigned char green = input[color_tid + 1]; const unsigned char red = input[color_tid + 2]; const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; output[gray_tid] = static_cast<unsigned char>(gray); } } // Get histogram with gpu and atomic operations //output - output image int array //histo - histogram of the images as an array // width, height - width and height of the images // grayWidthStep - number of gray bytes __global__ void get_histogram_kernel(unsigned char* output, int* histo,int width, int height, int grayWidthStep){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // 2D Index of current pixel in the image const int tid = yIndex * grayWidthStep + xIndex; // Shared histogram __shared__ int s_histo[C_SIZE]; // 2D index of shared memory int s_x = threadIdx.x+threadIdx.y*blockDim.x; //Initialize shared histogram to 0 if (s_x < C_SIZE) s_histo[s_x] = 0; __syncthreads(); //Fill shared histogram with the image info if ((xIndex < width) && (yIndex < height)) atomicAdd(&s_histo[(int)output[tid]], 1); __syncthreads(); // Copy infro from shared memory to global memory if (s_x < C_SIZE) atomicAdd(&histo[s_x], s_histo[s_x]); } //Calculate normalized histogram in kernel // histogram - original histogram of the image // n_histogram - normalized histogram // size - size of the image in pixels __global__ void get_normalizedHistogram_kernel(int * histogram, int * n_histogram, int size){ //Get histogram index from kernel int hIndex = threadIdx.y * blockDim.x + threadIdx.x; //Share the histogram with shared memory __shared__ int aux[C_SIZE]; // Normalize in GPU if (hIndex < 256 && blockIdx.x == 0 && blockIdx.y == 0){ aux[hIndex] = 0; aux[hIndex] = histogram[hIndex]; __syncthreads(); float step = size / (C_SIZE -1); float sum = 0; for(int i = 0; i <= hIndex; i++) sum += aux[i]; n_histogram[hIndex] = (int)floor(sum / step); } } // Histogram equalization on gpu // imput - input image //output - output image //hist - input image histogram // width, height - width and height of the images // grayWidthStep - number of gray bytes __global__ void equalizer_kernel(unsigned char* input, unsigned char* output, int * hist, int width, int height, int grayWidthStep){ //2D Index of current thread unsigned int xIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int yIndex = threadIdx.y + blockIdx.y * blockDim.y; //Thread ID const int tid = yIndex * grayWidthStep + xIndex; //Generate output image if((xIndex < width) && (yIndex < height)){ int index = input[tid]; output[tid] = hist[index]; } } //Call this function to run the image equalization // input - input image // output - black & white output image // eq_output - equalized output image // imageName - path to reach the input image void histogram_equalization(const cv::Mat& input, cv::Mat& output, cv::Mat& eq_output, string imageName){ //Get size of the image size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; int imSize = input.cols * input.rows; //Set device and cpu image arrays and histograms unsigned char *d_input, *d_output, *de_output; int * d_histogram, * df_histogram; //Initialize histograms int hisBytes = C_SIZE * sizeof(int); int * histogram = (int *)malloc(hisBytes); int * f_histogram = (int *)malloc(hisBytes); memset(histogram, 0, hisBytes); memset(f_histogram, 0, hisBytes); // Allocate device memory SAFE_CALL(hipMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&de_output, grayBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<int>(&d_histogram, hisBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<int>(&df_histogram, hisBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_input, input.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemset(d_output, 0, grayBytes), "Error setting d_output to 0"); SAFE_CALL(hipMemset(de_output, 0, grayBytes), "Error setting de_output to 0"); SAFE_CALL(hipMemset(d_histogram, 0, hisBytes), "Error setting d_histogram to 0"); SAFE_CALL(hipMemset(df_histogram, 0, hisBytes), "Error setting df_histogram to 0"); const dim3 block(16, 16); const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); // Launch the color conversion kernel printf("Converting image to black & white\n"); hipLaunchKernelGGL(( bgr_to_gray_kernel) , dim3(grid), dim3(block) , 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), static_cast<int>(output.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Device To Host Failed"); //Write the black & white image cv::imwrite("Images/bw_" + imageName , output); // Launch equalization on CPU printf("Equalization on cpu.\n"); float cpuTime = 0.0; auto start_cpu = chrono::high_resolution_clock::now(); equalizer_cpu(output, eq_output, histogram, f_histogram, imageName); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; cpuTime = duration_ms.count(); // Set the eq_output image to 0 in order to reuse it in gpu memset(eq_output.ptr(), 0, grayBytes); memset(histogram, 0, hisBytes); memset(f_histogram, 0, hisBytes); printf("\n\n"); //Launch equalization on GPU printf("Equalization on gpu.\n"); float gpuTime = 0.0; auto start_gpu = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( get_histogram_kernel), dim3(grid), dim3(block) , 0, 0, d_output, d_histogram, input.cols, input.rows, static_cast<int>(output.step)); hipLaunchKernelGGL(( get_normalizedHistogram_kernel), dim3(grid), dim3(block), 0, 0, d_histogram, df_histogram, imSize); hipLaunchKernelGGL(( equalizer_kernel), dim3(grid), dim3(block), 0, 0, d_output, de_output, df_histogram, output.cols, output.rows, static_cast<int>(output.step)); auto end_gpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> gpu_duration_ms = end_gpu - start_gpu; gpuTime += gpu_duration_ms.count(); SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); // SAFE_CALL kernel error SAFE_CALL(hipGetLastError(), "Error with last error"); // Copy device histograms to host histograms SAFE_CALL(hipMemcpy(histogram, d_histogram, hisBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); SAFE_CALL(hipMemcpy(f_histogram, df_histogram, hisBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); SAFE_CALL(hipMemcpy(eq_output.ptr(), de_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); //Save the image cv::imwrite("Images/eq_gpu_" + imageName , eq_output); printf("Time in CPU: %f\n", cpuTime); printf("Time in GPU: %f\n", gpuTime); printf("Speedup: %f\n", cpuTime / gpuTime ); // Free the device memory SAFE_CALL(hipFree(d_input), "CUDA Free Failed"); SAFE_CALL(hipFree(d_output), "CUDA Free Failed"); SAFE_CALL(hipFree(de_output), "CUDA Free Failed"); SAFE_CALL(hipFree(d_histogram), "CUDA Free Failed"); SAFE_CALL(hipFree(df_histogram), "CUDA Free Failed"); //Free the host memory free(histogram); free(f_histogram); // Reset device SAFE_CALL(hipDeviceReset(), "Error reseting"); } int main(int argc, char *argv[]){ string inputImage; if(argc < 2) inputImage = default_image; else inputImage = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(img_dest + inputImage, CV_LOAD_IMAGE_COLOR); if (input.empty()){ cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC1); //Create equalized output image cv::Mat eq_output(input.rows, input.cols, CV_8UC1); //Convert image to gray and equalize histogram_equalization(input, output, eq_output, inputImage); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Blac&WhiteInput", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); cv::resizeWindow("Input", 800, 600); cv::resizeWindow("Blac&WhiteInput", 800, 600); cv::resizeWindow("Output", 800, 600); //Show the input and output imshow("Input", input); imshow("Blac&WhiteInput", output); imshow("Output", eq_output); //Wait for key press cv::waitKey(); return 0; }
d07845b1cf486e1424df6f83604df869aeac2d03.cu
/* Author: Luis Carlos Arias Camacho Student ID: A01364808 ASSIGNMENT 4 */ #include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <cuda_runtime.h> #include <chrono> #include <math.h> #define img_dest "Images/" #define default_image "dog1.jpeg" #define C_SIZE 256 using namespace std; //Create input histogram // input - input images // histo - histogram // size - width * height of the image void create_input_histogram(const cv::Mat &input, int * histo, int size){ for (int i = 0; i < size; i++) histo[input.ptr()[i]]++; } //This function is used to normalize an histogram // histo - imput histogram as a one dimentional array of ints // n_histo - output normalized histogram as a one dimentional array of ints // size - size of the histograms void normalize_histogram(int * histo, int * n_histo, int size){ float step = size / (C_SIZE - 1); float sum = 0; for(int i=0; i < C_SIZE; i++){ sum += (float)histo[i]; n_histo[i] = (int)floor(sum / step); } } //Print the histogram void print_histogram(int * histo){ for(int i = 0; i < C_SIZE; i++) printf("%d : %d\n", i, histo[i]); } // Write image in cpu void write_image(const cv::Mat &input, cv::Mat &output, int * n_histo, int size){ for (int i = 0; i < size; i++) output.ptr()[i] = n_histo[input.ptr()[i]]; } // Histogram equalization on cpu // imput - input image //output - output image //imageName - path to achieve the image void equalizer_cpu(const cv::Mat &input, cv::Mat &output, int * histo, int * n_histo, string imageName){ int width = input.cols; int height = input.rows; int size_ = width * height; //Create the histogram for the input image create_input_histogram(input, histo, size_); //Create normalized histogram normalize_histogram(histo, n_histo, size_); //Write image with normalized histogram on output write_image(input, output, n_histo, size_); //Save the image cv::imwrite("Images/eq_cpu_" + imageName , output); } //This function converts a colored imege to a grayscale image // input - input image one dimensional array // ouput - output image one dimensional array // width, height - width and height of the images // colorWidthStep - number of color bytes (cols * colors) // grayWidthStep - number of gray bytes __global__ void bgr_to_gray_kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)){ const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const int gray_tid = yIndex * grayWidthStep + xIndex; const unsigned char blue = input[color_tid]; const unsigned char green = input[color_tid + 1]; const unsigned char red = input[color_tid + 2]; const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; output[gray_tid] = static_cast<unsigned char>(gray); } } // Get histogram with gpu and atomic operations //output - output image int array //histo - histogram of the images as an array // width, height - width and height of the images // grayWidthStep - number of gray bytes __global__ void get_histogram_kernel(unsigned char* output, int* histo,int width, int height, int grayWidthStep){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // 2D Index of current pixel in the image const int tid = yIndex * grayWidthStep + xIndex; // Shared histogram __shared__ int s_histo[C_SIZE]; // 2D index of shared memory int s_x = threadIdx.x+threadIdx.y*blockDim.x; //Initialize shared histogram to 0 if (s_x < C_SIZE) s_histo[s_x] = 0; __syncthreads(); //Fill shared histogram with the image info if ((xIndex < width) && (yIndex < height)) atomicAdd(&s_histo[(int)output[tid]], 1); __syncthreads(); // Copy infro from shared memory to global memory if (s_x < C_SIZE) atomicAdd(&histo[s_x], s_histo[s_x]); } //Calculate normalized histogram in kernel // histogram - original histogram of the image // n_histogram - normalized histogram // size - size of the image in pixels __global__ void get_normalizedHistogram_kernel(int * histogram, int * n_histogram, int size){ //Get histogram index from kernel int hIndex = threadIdx.y * blockDim.x + threadIdx.x; //Share the histogram with shared memory __shared__ int aux[C_SIZE]; // Normalize in GPU if (hIndex < 256 && blockIdx.x == 0 && blockIdx.y == 0){ aux[hIndex] = 0; aux[hIndex] = histogram[hIndex]; __syncthreads(); float step = size / (C_SIZE -1); float sum = 0; for(int i = 0; i <= hIndex; i++) sum += aux[i]; n_histogram[hIndex] = (int)floor(sum / step); } } // Histogram equalization on gpu // imput - input image //output - output image //hist - input image histogram // width, height - width and height of the images // grayWidthStep - number of gray bytes __global__ void equalizer_kernel(unsigned char* input, unsigned char* output, int * hist, int width, int height, int grayWidthStep){ //2D Index of current thread unsigned int xIndex = threadIdx.x + blockIdx.x * blockDim.x; unsigned int yIndex = threadIdx.y + blockIdx.y * blockDim.y; //Thread ID const int tid = yIndex * grayWidthStep + xIndex; //Generate output image if((xIndex < width) && (yIndex < height)){ int index = input[tid]; output[tid] = hist[index]; } } //Call this function to run the image equalization // input - input image // output - black & white output image // eq_output - equalized output image // imageName - path to reach the input image void histogram_equalization(const cv::Mat& input, cv::Mat& output, cv::Mat& eq_output, string imageName){ //Get size of the image size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; int imSize = input.cols * input.rows; //Set device and cpu image arrays and histograms unsigned char *d_input, *d_output, *de_output; int * d_histogram, * df_histogram; //Initialize histograms int hisBytes = C_SIZE * sizeof(int); int * histogram = (int *)malloc(hisBytes); int * f_histogram = (int *)malloc(hisBytes); memset(histogram, 0, hisBytes); memset(f_histogram, 0, hisBytes); // Allocate device memory SAFE_CALL(cudaMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&de_output, grayBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<int>(&d_histogram, hisBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<int>(&df_histogram, hisBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_input, input.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemset(d_output, 0, grayBytes), "Error setting d_output to 0"); SAFE_CALL(cudaMemset(de_output, 0, grayBytes), "Error setting de_output to 0"); SAFE_CALL(cudaMemset(d_histogram, 0, hisBytes), "Error setting d_histogram to 0"); SAFE_CALL(cudaMemset(df_histogram, 0, hisBytes), "Error setting df_histogram to 0"); const dim3 block(16, 16); const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); // Launch the color conversion kernel printf("Converting image to black & white\n"); bgr_to_gray_kernel <<<grid, block >>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), static_cast<int>(output.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Device To Host Failed"); //Write the black & white image cv::imwrite("Images/bw_" + imageName , output); // Launch equalization on CPU printf("Equalization on cpu.\n"); float cpuTime = 0.0; auto start_cpu = chrono::high_resolution_clock::now(); equalizer_cpu(output, eq_output, histogram, f_histogram, imageName); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; cpuTime = duration_ms.count(); // Set the eq_output image to 0 in order to reuse it in gpu memset(eq_output.ptr(), 0, grayBytes); memset(histogram, 0, hisBytes); memset(f_histogram, 0, hisBytes); printf("\n\n"); //Launch equalization on GPU printf("Equalization on gpu.\n"); float gpuTime = 0.0; auto start_gpu = chrono::high_resolution_clock::now(); get_histogram_kernel<<<grid, block >>>(d_output, d_histogram, input.cols, input.rows, static_cast<int>(output.step)); get_normalizedHistogram_kernel<<<grid, block>>>(d_histogram, df_histogram, imSize); equalizer_kernel<<<grid, block>>>(d_output, de_output, df_histogram, output.cols, output.rows, static_cast<int>(output.step)); auto end_gpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> gpu_duration_ms = end_gpu - start_gpu; gpuTime += gpu_duration_ms.count(); SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); // SAFE_CALL kernel error SAFE_CALL(cudaGetLastError(), "Error with last error"); // Copy device histograms to host histograms SAFE_CALL(cudaMemcpy(histogram, d_histogram, hisBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); SAFE_CALL(cudaMemcpy(f_histogram, df_histogram, hisBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); SAFE_CALL(cudaMemcpy(eq_output.ptr(), de_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Device To Device Failed"); //Save the image cv::imwrite("Images/eq_gpu_" + imageName , eq_output); printf("Time in CPU: %f\n", cpuTime); printf("Time in GPU: %f\n", gpuTime); printf("Speedup: %f\n", cpuTime / gpuTime ); // Free the device memory SAFE_CALL(cudaFree(d_input), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_output), "CUDA Free Failed"); SAFE_CALL(cudaFree(de_output), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_histogram), "CUDA Free Failed"); SAFE_CALL(cudaFree(df_histogram), "CUDA Free Failed"); //Free the host memory free(histogram); free(f_histogram); // Reset device SAFE_CALL(cudaDeviceReset(), "Error reseting"); } int main(int argc, char *argv[]){ string inputImage; if(argc < 2) inputImage = default_image; else inputImage = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(img_dest + inputImage, CV_LOAD_IMAGE_COLOR); if (input.empty()){ cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC1); //Create equalized output image cv::Mat eq_output(input.rows, input.cols, CV_8UC1); //Convert image to gray and equalize histogram_equalization(input, output, eq_output, inputImage); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Blac&WhiteInput", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); cv::resizeWindow("Input", 800, 600); cv::resizeWindow("Blac&WhiteInput", 800, 600); cv::resizeWindow("Output", 800, 600); //Show the input and output imshow("Input", input); imshow("Blac&WhiteInput", output); imshow("Output", eq_output); //Wait for key press cv::waitKey(); return 0; }
493423b51dde2f9ff52fa5c7f97d5953e3bc60e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_map_ops.h" #include "vector_math.h" #include "cuda_utils.h" #include <thrust/device_vector.h> #include <opencv2/cudaarithm.hpp> #include <opencv2/cudev/common.hpp> namespace slam { namespace map { __global__ void export_kernel(int3 *visible_block_pos, float3 *vertex, float3 *normal, uchar3 *colour) { } void export_(MapStruct map_struct, cv::cuda::GpuMat vertex, cv::cuda::GpuMat normal, cv::cuda::GpuMat colour) { } } // namespace map } // namespace slam
493423b51dde2f9ff52fa5c7f97d5953e3bc60e9.cu
#include "device_map_ops.h" #include "vector_math.h" #include "cuda_utils.h" #include <thrust/device_vector.h> #include <opencv2/cudaarithm.hpp> #include <opencv2/cudev/common.hpp> namespace slam { namespace map { __global__ void export_kernel(int3 *visible_block_pos, float3 *vertex, float3 *normal, uchar3 *colour) { } void export_(MapStruct map_struct, cv::cuda::GpuMat vertex, cv::cuda::GpuMat normal, cv::cuda::GpuMat colour) { } } // namespace map } // namespace slam
59c6cb086337a9efb76a0083aaf50bfe8b0aab8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <stdlib.h> #define FILAS 16 #define COLUMNAS 16 #define BYTES_MATRIZ (FILAS * COLUMNAS * sizeof(int)) __global__ void kernel_multiplicar(int *d_m1, int *d_m2,int *d_mr) { // Encuentro posicin: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Resultado de la multiplicacin: int valor_acumulado = 0; // Realizo la multiplicacin: for (int i = 0; i < COLUMNAS; i++) { int v1 = d_m1[fila * COLUMNAS + i]; int v2 = d_m2[i * COLUMNAS + columna]; valor_acumulado += v1 * v2; } // Lo guardo en la posicin: d_mr[fila * COLUMNAS + columna] = valor_acumulado; } void multiplicarMatrices(int *h_m1, int *h_m2, int *h_mr) { // Punteros a matrices en DEVICE: int *d_m1; int *d_m2; int *d_mr; // Reservo memoria en DEVICE: hipMalloc((void **)&d_m1, BYTES_MATRIZ); hipMalloc((void **)&d_m2, BYTES_MATRIZ); hipMalloc((void **)&d_mr, BYTES_MATRIZ); // Muevo de HOST a DEVICE: hipMemcpy(d_m1, h_m1, BYTES_MATRIZ, hipMemcpyHostToDevice); hipMemcpy(d_m2, h_m2, BYTES_MATRIZ, hipMemcpyHostToDevice); hipMemcpy(d_mr, h_mr, BYTES_MATRIZ, hipMemcpyHostToDevice); // Defino tamao de bloques: dim3 matriz_bloques(4, 4); dim3 matriz_hilos(4, 4); hipLaunchKernelGGL(( kernel_multiplicar) , dim3(matriz_bloques), dim3(matriz_hilos) , 0, 0, d_m1, d_m2, d_mr); // Espero a que termine de operar: hipDeviceSynchronize(); // Devolvemos resultado de DEVICE a HOST: hipMemcpy(h_mr, d_mr, BYTES_MATRIZ, hipMemcpyDeviceToHost); // Libero memoria de DEVICE: hipFree(d_m1); hipFree(d_m2); hipFree(d_mr); } void rellenarMatriz(int *h_m, int filas, int columnas) { /* Rellena una matriz de filasxcolumnas con nmeros aleatorios. */ srand(time(NULL)); for (int i = 0; i < filas; ++i) { for (int j = 0; j < columnas; ++j) { *(h_m + i * columnas + j) = rand() % 101; } } } void pintarMatriz(int *h_m, int filas, int columnas) { /* * Imprime matriz por pantalla. */ for (int i = 0; i < columnas; i++) { printf("["); for (int j = 0; j < filas; j++) { if (j != filas && j != 0) { printf("\t"); } printf("%d", *(h_m + i * columnas + j)); } printf("]\n"); } } int main() { // Declaracin de matrices en host: int* h_m1 = (int *)malloc(BYTES_MATRIZ); int* h_m2 = (int *)malloc(BYTES_MATRIZ); int* h_mr = (int *)malloc(BYTES_MATRIZ); // Matriz resultado. // Relleno con datos aleatorios las matrices: rellenarMatriz(h_m1, FILAS, COLUMNAS); rellenarMatriz(h_m2, FILAS, COLUMNAS); // Imprimo: printf("Matriz 1: \n"); pintarMatriz(h_m1, FILAS, COLUMNAS); printf("Matriz 2: \n"); pintarMatriz(h_m2, FILAS, COLUMNAS); // Multiplico: multiplicarMatrices(h_m1, h_m2, h_mr); // Imprimo resultado: printf("Matriz resultado: "); pintarMatriz(h_mr, FILAS, COLUMNAS); // Libero espacio en memoria: free(h_m1); free(h_m2); free(h_mr); return 0; }
59c6cb086337a9efb76a0083aaf50bfe8b0aab8c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <stdlib.h> #define FILAS 16 #define COLUMNAS 16 #define BYTES_MATRIZ (FILAS * COLUMNAS * sizeof(int)) __global__ void kernel_multiplicar(int *d_m1, int *d_m2,int *d_mr) { // Encuentro posición: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Resultado de la multiplicación: int valor_acumulado = 0; // Realizo la multiplicación: for (int i = 0; i < COLUMNAS; i++) { int v1 = d_m1[fila * COLUMNAS + i]; int v2 = d_m2[i * COLUMNAS + columna]; valor_acumulado += v1 * v2; } // Lo guardo en la posición: d_mr[fila * COLUMNAS + columna] = valor_acumulado; } void multiplicarMatrices(int *h_m1, int *h_m2, int *h_mr) { // Punteros a matrices en DEVICE: int *d_m1; int *d_m2; int *d_mr; // Reservo memoria en DEVICE: cudaMalloc((void **)&d_m1, BYTES_MATRIZ); cudaMalloc((void **)&d_m2, BYTES_MATRIZ); cudaMalloc((void **)&d_mr, BYTES_MATRIZ); // Muevo de HOST a DEVICE: cudaMemcpy(d_m1, h_m1, BYTES_MATRIZ, cudaMemcpyHostToDevice); cudaMemcpy(d_m2, h_m2, BYTES_MATRIZ, cudaMemcpyHostToDevice); cudaMemcpy(d_mr, h_mr, BYTES_MATRIZ, cudaMemcpyHostToDevice); // Defino tamaño de bloques: dim3 matriz_bloques(4, 4); dim3 matriz_hilos(4, 4); kernel_multiplicar <<< matriz_bloques, matriz_hilos >>> (d_m1, d_m2, d_mr); // Espero a que termine de operar: cudaDeviceSynchronize(); // Devolvemos resultado de DEVICE a HOST: cudaMemcpy(h_mr, d_mr, BYTES_MATRIZ, cudaMemcpyDeviceToHost); // Libero memoria de DEVICE: cudaFree(d_m1); cudaFree(d_m2); cudaFree(d_mr); } void rellenarMatriz(int *h_m, int filas, int columnas) { /* Rellena una matriz de filasxcolumnas con números aleatorios. */ srand(time(NULL)); for (int i = 0; i < filas; ++i) { for (int j = 0; j < columnas; ++j) { *(h_m + i * columnas + j) = rand() % 101; } } } void pintarMatriz(int *h_m, int filas, int columnas) { /* * Imprime matriz por pantalla. */ for (int i = 0; i < columnas; i++) { printf("["); for (int j = 0; j < filas; j++) { if (j != filas && j != 0) { printf("\t"); } printf("%d", *(h_m + i * columnas + j)); } printf("]\n"); } } int main() { // Declaración de matrices en host: int* h_m1 = (int *)malloc(BYTES_MATRIZ); int* h_m2 = (int *)malloc(BYTES_MATRIZ); int* h_mr = (int *)malloc(BYTES_MATRIZ); // Matriz resultado. // Relleno con datos aleatorios las matrices: rellenarMatriz(h_m1, FILAS, COLUMNAS); rellenarMatriz(h_m2, FILAS, COLUMNAS); // Imprimo: printf("Matriz 1: \n"); pintarMatriz(h_m1, FILAS, COLUMNAS); printf("Matriz 2: \n"); pintarMatriz(h_m2, FILAS, COLUMNAS); // Multiplico: multiplicarMatrices(h_m1, h_m2, h_mr); // Imprimo resultado: printf("Matriz resultado: "); pintarMatriz(h_mr, FILAS, COLUMNAS); // Libero espacio en memoria: free(h_m1); free(h_m2); free(h_mr); return 0; }
f0607de8bb4a4f55337b60ff9c1e5b151fee1c9d.hip
// !!! This is a file automatically generated by hipify!!! /* * CLF.cu * * Created on: Nov 8, 2017 * Author: zy */ #include "CLF.cuh" #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/Classification.cuh" #include "./../model/Limit.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "./../ErrorHandle/ErrorHandle.cuh" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> #include <iostream> #include <climits> using namespace std; /* * case0 * * > >= * 0 abs(y1-y2)<=Min && y1 >=0 && y2>=0 * abs(y1-y2)>Min && y1>=0 && y2>=0 * case1: abs(y1-y2)<=Min && y1 <0 && y2<0 * case2: abs(y1-y2)>Min && z>x1 && z<x2 && y1>0 (x1,z) * case3: abs(y1-y2)>Min && z>x1 && z<x2 && y2>0 (z,x2) * case4: abs(y1-y2)>Min && z<x1 && y1 <0 && y2<0 (xbefore,d) * case5: abs(y1-y2)>Min && z>x2 && y1 <0 && y2<0 (d,xafter) * * */ //case: __device__ bool case0(Coodinate* a,Coodinate* b,const int cmpType) { bool a1 = (cmpType==ConstantValue::Equal); bool a2 = (cmpType==ConstantValue::NotEqual); bool a3 = (cmpType==ConstantValue::GreatOrGreatEqual) && (a->y >=0) && (b->y >=0); return a1 || a2 || a3; } // __device__ bool case1(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)<=Limit::FloatMin) && ((a->y <=0) || (a->y <=-0.0)) && ((b->y <=0) || (b->y <=-0.0)); } // __device__ bool case2(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y > 0) && (b->y < 0); } // __device__ bool case3(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y < 0) && (b->y > 0); } // __device__ bool case4(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y > b->y);; } // __device__ bool case5(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y < b->y); }
f0607de8bb4a4f55337b60ff9c1e5b151fee1c9d.cu
/* * CLF.cu * * Created on: Nov 8, 2017 * Author: zy */ #include "CLF.cuh" #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/Classification.cuh" #include "./../model/Limit.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "./../ErrorHandle/ErrorHandle.cuh" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> #include <iostream> #include <climits> using namespace std; /* * case:0 * 等号和不等号 全区间 * 判断是否是> 和 >=比较运算符号的处理 * 斜率为0: abs(y1-y2)<=Min && y1 >=0 && y2>=0 全区间 * 斜率存在: abs(y1-y2)>Min && y1>=0 && y2>=0 全区间 * case1: abs(y1-y2)<=Min && y1 <0 && y2<0 无解 * case2: abs(y1-y2)>Min && z>x1 && z<x2 && y1>0 (x1,z) * case3: abs(y1-y2)>Min && z>x1 && z<x2 && y2>0 (z,x2) * case4: abs(y1-y2)>Min && z<x1 && y1 <0 && y2<0 无解(xbefore,d) * case5: abs(y1-y2)>Min && z>x2 && y1 <0 && y2<0 无解(d,xafter) * 下面是所有的分类的变量的设置 * */ //全区间case:等式、不等式、全在上方 __device__ bool case0(Coodinate* a,Coodinate* b,const int cmpType) { bool a1 = (cmpType==ConstantValue::Equal); bool a2 = (cmpType==ConstantValue::NotEqual); bool a3 = (cmpType==ConstantValue::GreatOrGreatEqual) && (a->y >=0) && (b->y >=0); return a1 || a2 || a3; } //斜率不存在,导致无解 __device__ bool case1(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)<=Limit::FloatMin) && ((a->y <=0) || (a->y <=-0.0)) && ((b->y <=0) || (b->y <=-0.0)); } //零点落在中间,左边部分解 __device__ bool case2(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y > 0) && (b->y < 0); } //零点落在中间,右边部分解 __device__ bool case3(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y < 0) && (b->y > 0); } //无解,零点落在左边 __device__ bool case4(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y > b->y);; } //无解,零点落在右边 __device__ bool case5(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y < b->y); }
82055f38de141d8768b03f47373b5e4e52d6aa8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/dznrm2.cu, normal z -> c, Tue Aug 30 09:38:33 2016 */ #include "magma_internal.h" #include "commonblas_c.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define COMPLEX /******************************************************************************/ __global__ void magmablas_scnrm2_kernel( int m, magmaFloatComplex *dA, int ldda, float *dxnorm ) { const int tx = threadIdx.x; magmaFloatComplex *dx = dA + blockIdx.x * ldda; __shared__ float sum[ BLOCK_SIZE ]; // get norm of dx float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } /******************************************************************************/ __global__ void magmablas_scnrm2_check_kernel( int m, magmaFloatComplex *dA, int ldda, float *dxnorm, float *lsticc ) { const int tx = threadIdx.x; magmaFloatComplex *dx = dA + blockIdx.x * ldda; __shared__ float sum[ BLOCK_SIZE ]; // get norm of dx only if lsticc[blockIdx+1] != 0 if ( lsticc[blockIdx.x + 1] == 0 ) return; float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } /******************************************************************************/ extern "C" void magmablas_scnrm2_check_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magmaFloat_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); hipLaunchKernelGGL(( magmablas_scnrm2_check_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, dxnorm, dlsticc ); } /******************************************************************************/ __global__ void magmablas_scnrm2_smkernel( int m, int n, magmaFloatComplex *dA, int ldda, float *dxnorm ) { const int tx = threadIdx.x; const int ty = threadIdx.y; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; for( int k = ty; k < n; k += BLOCK_SIZEy ) { magmaFloatComplex *dx = dA + k * ldda; // get norm of dx float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZEx ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx][ty] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum ); if (tx == 0) dxnorm[k] = sqrt(sum[0][ty]); __syncthreads(); } } /******************************************************************************/ /* Compute the scnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. This routine uses only one SM (block). */ extern "C" void magmablas_scnrm2_sm_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); dim3 blocks( 1, 1 ); hipLaunchKernelGGL(( magmablas_scnrm2_smkernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dxnorm ); } /******************************************************************************/ __global__ void magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c) { const int tx = threadIdx.x; __shared__ float sum[ BLOCK_SIZE ]; float temp; temp = MAGMA_C_ABS( c[tx] ) / xnorm[0]; sum[tx] = -temp * temp; magma_sum_reduce_n( blockDim.x, tx, sum ); __syncthreads(); if (tx == 0) xnorm[0] = xnorm[0] * sqrt(1+sum[0]); } /******************************************************************************/ /* Adjust the norm of c to give the norm of c[k+1:], assuming that c was changed with orthogonal transformations. */ extern "C" void magmablas_scnrm2_adjust_q( magma_int_t k, magmaFloat_ptr dxnorm, magmaFloatComplex_ptr dc, magma_queue_t queue ) { dim3 threads( k ); dim3 blocks( 1 ); hipLaunchKernelGGL(( magma_scnrm2_adjust_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , dxnorm, dc); } /******************************************************************************/ #define BS 256 __global__ void magma_scnrm2_row_check_adjust_kernel( int n, float tol, float *xnorm, float *xnorm2, magmaFloatComplex *C, int ldc, float *lsticc) { const int tx = threadIdx.x + blockIdx.x*BS; lsticc[tx+1] = 0; if (tx < n) { float temp = MAGMA_C_ABS( C[tx*ldc] ) / xnorm[tx]; temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) ); float temp2 = xnorm[tx] / xnorm2[tx]; temp2 = temp * (temp2 * temp2); if (temp2 <= tol) { lsticc[tx+1] = 1; } else { xnorm[tx] *= sqrt(temp); } } if (tx == 0) lsticc[0] = 0; magma_sum_reduce_n( blockDim.x, tx, lsticc ); } /******************************************************************************/ /* Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that C was changed with orthogonal transformations. It also do checks for QP3 */ extern "C" void magmablas_scnrm2_row_check_adjust_q( magma_int_t k, float tol, magmaFloat_ptr dxnorm, magmaFloat_ptr dxnorm2, magmaFloatComplex_ptr dC, magma_int_t lddc, magmaFloat_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BS ); dim3 blocks( magma_ceildiv( k, BS ) ); hipLaunchKernelGGL(( magma_scnrm2_row_check_adjust_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc); } /******************************************************************************/ /* Compute the scnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. The computation can be done using n blocks (default) or on one SM (commented). */ extern "C" void magmablas_scnrm2_cols_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); hipLaunchKernelGGL(( magmablas_scnrm2_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, dxnorm ); // The following would do the computation on one SM // magmablas_scnrm2_sm_q( m, n, dA, ldda, dxnorm, queue ); }
82055f38de141d8768b03f47373b5e4e52d6aa8c.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/dznrm2.cu, normal z -> c, Tue Aug 30 09:38:33 2016 */ #include "magma_internal.h" #include "commonblas_c.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define COMPLEX /******************************************************************************/ __global__ void magmablas_scnrm2_kernel( int m, magmaFloatComplex *dA, int ldda, float *dxnorm ) { const int tx = threadIdx.x; magmaFloatComplex *dx = dA + blockIdx.x * ldda; __shared__ float sum[ BLOCK_SIZE ]; // get norm of dx float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } /******************************************************************************/ __global__ void magmablas_scnrm2_check_kernel( int m, magmaFloatComplex *dA, int ldda, float *dxnorm, float *lsticc ) { const int tx = threadIdx.x; magmaFloatComplex *dx = dA + blockIdx.x * ldda; __shared__ float sum[ BLOCK_SIZE ]; // get norm of dx only if lsticc[blockIdx+1] != 0 if ( lsticc[blockIdx.x + 1] == 0 ) return; float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZE ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); if (tx == 0) dxnorm[blockIdx.x] = sqrt(sum[0]); } /******************************************************************************/ extern "C" void magmablas_scnrm2_check_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magmaFloat_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); magmablas_scnrm2_check_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, dxnorm, dlsticc ); } /******************************************************************************/ __global__ void magmablas_scnrm2_smkernel( int m, int n, magmaFloatComplex *dA, int ldda, float *dxnorm ) { const int tx = threadIdx.x; const int ty = threadIdx.y; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; for( int k = ty; k < n; k += BLOCK_SIZEy ) { magmaFloatComplex *dx = dA + k * ldda; // get norm of dx float lsum = 0; for( int j = tx; j < m; j += BLOCK_SIZEx ) { #ifdef REAL float re = dx[j]; lsum += re*re; #else float re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[tx][ty] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum ); if (tx == 0) dxnorm[k] = sqrt(sum[0][ty]); __syncthreads(); } } /******************************************************************************/ /* Compute the scnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. This routine uses only one SM (block). */ extern "C" void magmablas_scnrm2_sm_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); dim3 blocks( 1, 1 ); magmablas_scnrm2_smkernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dxnorm ); } /******************************************************************************/ __global__ void magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c) { const int tx = threadIdx.x; __shared__ float sum[ BLOCK_SIZE ]; float temp; temp = MAGMA_C_ABS( c[tx] ) / xnorm[0]; sum[tx] = -temp * temp; magma_sum_reduce_n( blockDim.x, tx, sum ); __syncthreads(); if (tx == 0) xnorm[0] = xnorm[0] * sqrt(1+sum[0]); } /******************************************************************************/ /* Adjust the norm of c to give the norm of c[k+1:], assuming that c was changed with orthogonal transformations. */ extern "C" void magmablas_scnrm2_adjust_q( magma_int_t k, magmaFloat_ptr dxnorm, magmaFloatComplex_ptr dc, magma_queue_t queue ) { dim3 threads( k ); dim3 blocks( 1 ); magma_scnrm2_adjust_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> (dxnorm, dc); } /******************************************************************************/ #define BS 256 __global__ void magma_scnrm2_row_check_adjust_kernel( int n, float tol, float *xnorm, float *xnorm2, magmaFloatComplex *C, int ldc, float *lsticc) { const int tx = threadIdx.x + blockIdx.x*BS; lsticc[tx+1] = 0; if (tx < n) { float temp = MAGMA_C_ABS( C[tx*ldc] ) / xnorm[tx]; temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) ); float temp2 = xnorm[tx] / xnorm2[tx]; temp2 = temp * (temp2 * temp2); if (temp2 <= tol) { lsticc[tx+1] = 1; } else { xnorm[tx] *= sqrt(temp); } } if (tx == 0) lsticc[0] = 0; magma_sum_reduce_n( blockDim.x, tx, lsticc ); } /******************************************************************************/ /* Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that C was changed with orthogonal transformations. It also do checks for QP3 */ extern "C" void magmablas_scnrm2_row_check_adjust_q( magma_int_t k, float tol, magmaFloat_ptr dxnorm, magmaFloat_ptr dxnorm2, magmaFloatComplex_ptr dC, magma_int_t lddc, magmaFloat_ptr dlsticc, magma_queue_t queue ) { dim3 threads( BS ); dim3 blocks( magma_ceildiv( k, BS ) ); magma_scnrm2_row_check_adjust_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> (k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc); } /******************************************************************************/ /* Compute the scnrm2 of each column of m-by-n matrix dA. The resulting norms are written in the dxnorm array. The computation can be done using n blocks (default) or on one SM (commented). */ extern "C" void magmablas_scnrm2_cols_q( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloat_ptr dxnorm, magma_queue_t queue ) { dim3 threads( BLOCK_SIZE ); dim3 blocks( n ); magmablas_scnrm2_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, dxnorm ); // The following would do the computation on one SM // magmablas_scnrm2_sm_q( m, n, dA, ldda, dxnorm, queue ); }
45ebb712fdeed2f0d3e89bbe5d234d024f709e42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdbool.h> #include <stdio.h> #include <vector> #include <iostream> #include "nms_cuda_kernel.h" #define CUDA_WARN(XXX) \ do { if (XXX != hipSuccess) std::cout << "CUDA Error: " << \ hipGetErrorString(XXX) << ", at line " << __LINE__ \ << std::endl; hipDeviceSynchronize(); } while (0) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(int n_boxes, float nms_overlap_thresh, float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh) { float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); // printf("i am at line %d\n", boxes_num); // printf("i am at line %d\n", boxes_dim); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); // we need to create a memory for keep_out on cpu // otherwise, the following code cannot run int* keep_out_cpu = new int[boxes_num]; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { // orignal: keep_out[num_to_keep++] = i; keep_out_cpu[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } // copy keep_out_cpu to keep_out on gpu CUDA_WARN(hipMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int),hipMemcpyHostToDevice)); // *num_out = num_to_keep; // original: *num_out = num_to_keep; // copy num_to_keep to num_out on gpu CUDA_WARN(hipMemcpy(num_out, &num_to_keep, 1 * sizeof(int),hipMemcpyHostToDevice)); // release cuda memory CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); // release cpu memory delete []keep_out_cpu; }
45ebb712fdeed2f0d3e89bbe5d234d024f709e42.cu
#include <stdbool.h> #include <stdio.h> #include <vector> #include <iostream> #include "nms_cuda_kernel.h" #define CUDA_WARN(XXX) \ do { if (XXX != cudaSuccess) std::cout << "CUDA Error: " << \ cudaGetErrorString(XXX) << ", at line " << __LINE__ \ << std::endl; cudaDeviceSynchronize(); } while (0) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(int n_boxes, float nms_overlap_thresh, float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh) { float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); // printf("i am at line %d\n", boxes_num); // printf("i am at line %d\n", boxes_dim); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); // we need to create a memory for keep_out on cpu // otherwise, the following code cannot run int* keep_out_cpu = new int[boxes_num]; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { // orignal: keep_out[num_to_keep++] = i; keep_out_cpu[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } // copy keep_out_cpu to keep_out on gpu CUDA_WARN(cudaMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int),cudaMemcpyHostToDevice)); // *num_out = num_to_keep; // original: *num_out = num_to_keep; // copy num_to_keep to num_out on gpu CUDA_WARN(cudaMemcpy(num_out, &num_to_keep, 1 * sizeof(int),cudaMemcpyHostToDevice)); // release cuda memory CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); // release cpu memory delete []keep_out_cpu; }
f33d5742be55c7cf76b5b268c8aac520ae17c135.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //device functions __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_2D() { return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_3D() { return blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D() { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_1D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } //kernels __global__ void kernel_1D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D()); } __global__ void kernel_1D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_1D_2D()); } __global__ void kernel_1D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_1D_3D()); } __global__ void kernel_2D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_2D_1D()); } __global__ void kernel_2D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_2D_2D()); } __global__ void kernel_2D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_2D_3D()); } __global__ void kernel_3D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_3D_1D()); } __global__ void kernel_3D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_3D_2D()); } __global__ void kernel_3D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_3D_3D()); } int main() { printf("\nLaunching kernel as 1D grid of 1D blocks...\n"); kernel_1D_1D << <dim3(2, 1, 1), dim3(2, 1, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 1D grid of 2D blocks...\n"); kernel_1D_2D << <dim3(2, 1, 1), dim3(2, 2, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 1D grid of 3D blocks...\n"); kernel_1D_3D << <dim3(2, 1, 1), dim3(2, 2, 2) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 1D blocks...\n"); kernel_2D_1D << <dim3(2, 2, 1), dim3(2, 1, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 2D blocks...\n"); kernel_2D_2D << <dim3(2, 2, 1), dim3(2, 2, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 2D grid of 3D blocks...\n"); kernel_2D_3D << <dim3(2, 2, 1), dim3(2, 2, 2) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 1D blocks...\n"); kernel_3D_1D << <dim3(2, 2, 2), dim3(2, 1, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 2D blocks...\n"); kernel_3D_2D << <dim3(2, 2, 2), dim3(2, 2, 1) >> >(); hipDeviceReset(); printf("\nLaunching kernel as 3D grid of 3D blocks...\n"); kernel_3D_3D << <dim3(32, 1, 1), dim3(16, 64, 1) >> >(); hipDeviceReset(); return 0; }
f33d5742be55c7cf76b5b268c8aac520ae17c135.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //device functions __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_2D() { return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_1D_3D() { return blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D() { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_2D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_1D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } //kernels __global__ void kernel_1D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D()); } __global__ void kernel_1D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_1D_2D()); } __global__ void kernel_1D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_1D_3D()); } __global__ void kernel_2D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_2D_1D()); } __global__ void kernel_2D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_2D_2D()); } __global__ void kernel_2D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_2D_3D()); } __global__ void kernel_3D_1D() { printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_3D_1D()); } __global__ void kernel_3D_2D() { printf("Local thread IDs: (%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, getGlobalIdx_3D_2D()); } __global__ void kernel_3D_3D() { printf("Local thread IDs: (%i,%i,%i) Global thread ID: %i\n", threadIdx.x, threadIdx.y, threadIdx.z, getGlobalIdx_3D_3D()); } int main() { printf("\nLaunching kernel as 1D grid of 1D blocks...\n"); kernel_1D_1D << <dim3(2, 1, 1), dim3(2, 1, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 1D grid of 2D blocks...\n"); kernel_1D_2D << <dim3(2, 1, 1), dim3(2, 2, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 1D grid of 3D blocks...\n"); kernel_1D_3D << <dim3(2, 1, 1), dim3(2, 2, 2) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 1D blocks...\n"); kernel_2D_1D << <dim3(2, 2, 1), dim3(2, 1, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 2D blocks...\n"); kernel_2D_2D << <dim3(2, 2, 1), dim3(2, 2, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 2D grid of 3D blocks...\n"); kernel_2D_3D << <dim3(2, 2, 1), dim3(2, 2, 2) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 1D blocks...\n"); kernel_3D_1D << <dim3(2, 2, 2), dim3(2, 1, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 2D blocks...\n"); kernel_3D_2D << <dim3(2, 2, 2), dim3(2, 2, 1) >> >(); cudaDeviceReset(); printf("\nLaunching kernel as 3D grid of 3D blocks...\n"); kernel_3D_3D << <dim3(32, 1, 1), dim3(16, 64, 1) >> >(); cudaDeviceReset(); return 0; }
c4b9fb2d182ccfc725cd4a143fb5b31cf68ee6d6.hip
// !!! This is a file automatically generated by hipify!!! /***************************************** Emitting C Generated Code *******************************************/ #include "nccl_header.h" #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include <stdbool.h> #include "mpi_header.h" /**************** Snippet ****************/ void Snippet(int x0) { int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); int x3 = MPI_Comm_rank(MPI_COMM_WORLD, &x1); MPICHECK(x3); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x2)); printf("myRank: %d, nRanks: %d\n", x1, x2); ncclUniqueId x4; ncclComm_t x5; hipStream_t x6; if (x1 == 0) NCCLCHECK(ncclGetUniqueId(&x4)); MPICHECK(MPI_Bcast(&x4, NCCL_UNIQUE_ID_BYTES, MPI_BYTE, 0, MPI_COMM_WORLD)); float* x7 = (float*)malloc(1024 * sizeof(float)); int x8 = 0; while (x8 != 1024) { x7[x8] = 2.0; x8 = x8 + 1; } CUDA_CALL(hipSetDevice(x1)); float** x9 = (float**)malloc(x2 * sizeof(float*)); float** x10 = (float**)malloc(x2 * sizeof(float*)); int x11 = x2; int x12 = 0; while (x12 != x11) { int x13 = x12; CUDA_CALL(hipMalloc(&x9[x13], (size_t)(1024 * sizeof(float)))); CUDA_CALL(hipMalloc(&x10[x13], (size_t)(1024 * sizeof(float)))); CUDA_CALL(hipMemcpy(x9[x13], x7, (size_t)(1024 * sizeof(float)), hipMemcpyHostToDevice)); x12 = x12 + 1; } CUDA_CALL(hipStreamCreateWithFlags(&x6, hipStreamDefault)); NCCLCHECK(ncclCommInitRank(&x5, x2, x4, x1)); int x14 = x2; ncclDataType_t x15 = ncclFloat; NCCLCHECK(ncclGroupStart()); int x16 = 0; while (x16 != x14) { int x17 = x16; NCCLCHECK(ncclSend(x9[x17], 1024, x15, x17, x5, x6)); NCCLCHECK(ncclRecv(x10[x17], 1024, x15, x17, x5, x6)); x16 = x16 + 1; } NCCLCHECK(ncclGroupEnd()); CUDA_CALL(hipStreamSynchronize(x6)); int x18 = 0; int x19 = x2; int x20 = 0; while (x20 != x19) { float* x21 = (float*)malloc(1024 * sizeof(float)); CUDA_CALL(hipMemcpy(x21, x10[x20], (size_t)(1024 * sizeof(float)), hipMemcpyDeviceToHost)); int x22 = 0; while (x22 != 1024) { if (x21[x22] != 2) x18 = x18 + 1; x22 = x22 + 1; } x20 = x20 + 1; } int x23 = x2; int x24 = 0; while (x24 != x23) { int x25 = x24; CUDA_CALL(hipFree(x9[x25])); CUDA_CALL(hipFree(x10[x25])); x24 = x24 + 1; } NCCLCHECK(ncclCommDestroy(x5)); MPICHECK(MPI_Finalize()); if (x18 != 0) printf("[MPI Rank %d] Found %d errors.\n", x1, x18); else printf("[MPI Rank %d] Success \n", x1); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
c4b9fb2d182ccfc725cd4a143fb5b31cf68ee6d6.cu
/***************************************** Emitting C Generated Code *******************************************/ #include "nccl_header.h" #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include <stdbool.h> #include "mpi_header.h" /**************** Snippet ****************/ void Snippet(int x0) { int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); int x3 = MPI_Comm_rank(MPI_COMM_WORLD, &x1); MPICHECK(x3); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x2)); printf("myRank: %d, nRanks: %d\n", x1, x2); ncclUniqueId x4; ncclComm_t x5; cudaStream_t x6; if (x1 == 0) NCCLCHECK(ncclGetUniqueId(&x4)); MPICHECK(MPI_Bcast(&x4, NCCL_UNIQUE_ID_BYTES, MPI_BYTE, 0, MPI_COMM_WORLD)); float* x7 = (float*)malloc(1024 * sizeof(float)); int x8 = 0; while (x8 != 1024) { x7[x8] = 2.0; x8 = x8 + 1; } CUDA_CALL(cudaSetDevice(x1)); float** x9 = (float**)malloc(x2 * sizeof(float*)); float** x10 = (float**)malloc(x2 * sizeof(float*)); int x11 = x2; int x12 = 0; while (x12 != x11) { int x13 = x12; CUDA_CALL(cudaMalloc(&x9[x13], (size_t)(1024 * sizeof(float)))); CUDA_CALL(cudaMalloc(&x10[x13], (size_t)(1024 * sizeof(float)))); CUDA_CALL(cudaMemcpy(x9[x13], x7, (size_t)(1024 * sizeof(float)), cudaMemcpyHostToDevice)); x12 = x12 + 1; } CUDA_CALL(cudaStreamCreateWithFlags(&x6, cudaStreamDefault)); NCCLCHECK(ncclCommInitRank(&x5, x2, x4, x1)); int x14 = x2; ncclDataType_t x15 = ncclFloat; NCCLCHECK(ncclGroupStart()); int x16 = 0; while (x16 != x14) { int x17 = x16; NCCLCHECK(ncclSend(x9[x17], 1024, x15, x17, x5, x6)); NCCLCHECK(ncclRecv(x10[x17], 1024, x15, x17, x5, x6)); x16 = x16 + 1; } NCCLCHECK(ncclGroupEnd()); CUDA_CALL(cudaStreamSynchronize(x6)); int x18 = 0; int x19 = x2; int x20 = 0; while (x20 != x19) { float* x21 = (float*)malloc(1024 * sizeof(float)); CUDA_CALL(cudaMemcpy(x21, x10[x20], (size_t)(1024 * sizeof(float)), cudaMemcpyDeviceToHost)); int x22 = 0; while (x22 != 1024) { if (x21[x22] != 2) x18 = x18 + 1; x22 = x22 + 1; } x20 = x20 + 1; } int x23 = x2; int x24 = 0; while (x24 != x23) { int x25 = x24; CUDA_CALL(cudaFree(x9[x25])); CUDA_CALL(cudaFree(x10[x25])); x24 = x24 + 1; } NCCLCHECK(ncclCommDestroy(x5)); MPICHECK(MPI_Finalize()); if (x18 != 0) printf("[MPI Rank %d] Found %d errors.\n", x1, x18); else printf("[MPI Rank %d] Success \n", x1); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
728b114b8e6997e0f9122b2d08c6511a3f73ecf6.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Implementation of the 2D wave equation #include <math.h> #include <hip/hip_runtime.h> #include "InputOutput.h" #include "Utilities.cuh" #include "Matlab_like.cuh" #include "TimingGPU.cuh" #define BLOCKSIZEX 16 #define BLOCKSIZEY 16 #define DEBUG /***********************************/ /* HOST-SIZE FIELD UPDATE FUNCTION */ /***********************************/ void updateHost(const double * __restrict__ h_uold, const double * __restrict__ h_u, double * __restrict__ h_unew, const double alphaSquared, const int Nx, const int Ny) { for (int j = 1; j < Ny - 1; j++) for (int i = 1; i < Nx - 1; i++) h_unew[j * Nx + i] = 2. * h_u[j * Nx + i] - h_uold[j * Nx + i] + alphaSquared * (h_u[j * Nx + i - 1] + h_u[j * Nx + i + 1] + h_u[(j + 1) * Nx + i] + h_u[(j - 1) * Nx + i] - 4. * h_u[j * Nx + i]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - NO SHARED MEMORY */ /********************************************************/ __global__ void updateDevice_v0(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx - 1) || (tidx == 0) || (tidy >= Ny - 1) || (tidy == 0)) return; d_unew[tidy * Nx + tidx] = 2. * d_u[tidy * Nx + tidx] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v1 */ /********************************************************/ __global__ void updateDevice_v1(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions are NOT loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if ((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY 1)) && (tidx < Nx - 1) && (tidy < Ny - 1)) // --- If we do not need halo region elements (we are "inside" a thread block, not on the border), then use shared memory. d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x][threadIdx.y] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); else if (tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1) // --- Only update "interior" (not boundary) node points // --- If we need halo region elements, then use global memory. d_unew[tidy * Nx + tidx] = 2. * d_u[tidy * Nx + tidx] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v2 */ /********************************************************/ __global__ void updateDevice_v2(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = blockIdx.x * (BLOCKSIZEX - 2) + threadIdx.x; const int tidy = blockIdx.y * (BLOCKSIZEY - 2) + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions ARE loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if (((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY 1))) && (tidx < Nx - 1 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x][threadIdx.y] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v3 */ /********************************************************/ __global__ void updateDevice_v3(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; const int tid_block = threadIdx.y * BLOCKSIZEX + threadIdx.x; // --- Flattened thread index within a block const int tidx1 = tid_block % (BLOCKSIZEX + 2); const int tidy1 = tid_block / (BLOCKSIZEY + 2); const int tidx2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) % (BLOCKSIZEX + 2); const int tidy2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) / (BLOCKSIZEY + 2); __shared__ double d_u_sh[BLOCKSIZEX + 2][BLOCKSIZEY + 2]; if (((blockIdx.x * BLOCKSIZEX - 1 + tidx1) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx1) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) >= 0)) d_u_sh[tidx1][tidy1] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx1) + (blockIdx.y * BLOCKSIZEY - 1 + tidy1) * Nx]; if (((tidx2 < (BLOCKSIZEX + 2)) && (tidy2 < (BLOCKSIZEY + 2))) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) >= 0)) d_u_sh[tidx2][tidy2] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx2) + (blockIdx.y * BLOCKSIZEY - 1 + tidy2) * Nx]; __syncthreads(); if ((tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x][threadIdx.y + 1] + d_u_sh[threadIdx.x + 2][threadIdx.y + 1] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y + 2] - 4. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1]); } /********/ /* MAIN */ /********/ int main() { const int Nx = 512; // --- Number of mesh points along x const int Ny = 512; // --- Number of mesh points along y const double Lx = 200.; // --- Length of the domain along x const double Ly = 200; // --- Length of the domain along y double *h_x = h_linspace(0., Lx, Nx); // --- Mesh points along x double *h_y = h_linspace(0., Ly, Ny); // --- Mesh points along y const double dx = h_x[2] - h_x[1]; // --- Mesh step along x const double dy = h_y[2] - h_y[1]; // --- Mesh step along y const double v = 5.; // --- Wave speed const double p = 0.0; // --- Wave decay factor const double dt = 0.25 / (v * sqrt((1. / dx) * (1. / dx) + (1. / dy) * (1. / dy))); // --- Time - Step matching the Courant - Friedrichs - Lewy condition const int T = floor((3. * sqrt(Lx * Lx + Ly * Ly) / v) / dt); // --- Total number of time steps double *h_u = (double *)calloc(Nx * Ny, sizeof(double)); // --- Current solution u(x, y, t) - host double *h_uold = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the previous step - host double *h_unew = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the next step - host double *d_u; gpuErrchk(hipMalloc((void**)&d_u, Nx * Ny * sizeof(double))); // --- Current solution u(x, y, t) - device double *d_uold; gpuErrchk(hipMalloc((void**)&d_uold, Nx * Ny * sizeof(double))); // --- Solution at the previous step - device double *d_unew; gpuErrchk(hipMalloc((void**)&d_unew, Nx * Ny * sizeof(double))); // --- Solution at the next step - device gpuErrchk(hipMemset(d_unew, 0, Nx * Ny * sizeof(double))); // --- Initial conditions const int indxc = floor(Nx / 3) - 1; // --- Index for the source location along x const int indyc = floor(Ny / 2) - 1; // --- Index for the source location along y const double xc = h_x[indxc]; // --- x - coordinate of source const double yc = h_y[indyc]; // --- y - coordinate of source const int indRc = 50; const double Rc = Lx / indRc; for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { if (sqrt((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) <= Rc) h_u[j * Nx + i] = exp(-indRc * ((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) / Lx); h_uold[j * Nx + i] = h_u[j * Nx + i]; } // --- Transfering the initial condition from host to device gpuErrchk(hipMemcpy(d_uold, h_uold, Nx * Ny * sizeof(double), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_u, h_u, Nx * Ny * sizeof(double), hipMemcpyHostToDevice)); /*********************/ /* ITERATIONS - HOST */ /*********************/ const double alphaSquared = dt * dt * v * v / (dx * dx); // --- CFL number for (int tt = 0; tt < T; tt++) { updateHost(h_uold, h_u, h_unew, alphaSquared, Nx, Ny); h_uold = h_u; // --- Curent solution becomes old h_u = h_unew; // --- New solution becomes current h_unew = h_uold; } /***********************/ /* ITERATIONS - DEVICE */ /***********************/ // --- For the cases of no shared memory and shared memory v1 and v3 dim3 Grid(iDivUp(Nx, BLOCKSIZEX), iDivUp(Ny, BLOCKSIZEY)); dim3 Block(BLOCKSIZEX, BLOCKSIZEY); // --- For the case of shared memory v2 only //dim3 Grid(iDivUp(Nx, BLOCKSIZEX - 2), iDivUp(Ny, BLOCKSIZEY - 2)); //dim3 Block(BLOCKSIZEX, BLOCKSIZEY); TimingGPU timerGPU; timerGPU.StartCounter(); for (int tt = 0; tt < T; tt++) { //updateDevice_v0<<<Grid, Block>>>(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); //updateDevice_v1<<<Grid, Block>>>(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); //updateDevice_v2 << <Grid, Block >> >(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); updateDevice_v3 << <Grid, Block >> >(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif d_uold = d_u; // --- Curent solution becomes old d_u = d_unew; // --- New solution becomes current d_unew = d_uold; } printf("GPU timing %f\n", timerGPU.GetCounter()); saveCPUrealtxt(h_u, "D:\\PDEs\\Wave-Equation\\2D\\Matlab\\FDTD2D_hostResult.txt", Nx * Ny); double *h_uDevice = (double *)malloc(Nx * Ny * sizeof(double)); gpuErrchk(hipMemcpy(h_uDevice, d_u, Nx * Ny * sizeof(double), hipMemcpyDeviceToHost)); saveCPUrealtxt(h_uDevice, "D:\\PDEs\\Wave-Equation\\2D\\Matlab\\FDTD2D_deviceResult.txt", Nx * Ny); return 0; }
728b114b8e6997e0f9122b2d08c6511a3f73ecf6.cu
// CUDA Implementation of the 2D wave equation #include <math.h> #include <cuda.h> #include "InputOutput.h" #include "Utilities.cuh" #include "Matlab_like.cuh" #include "TimingGPU.cuh" #define BLOCKSIZEX 16 #define BLOCKSIZEY 16 #define DEBUG /***********************************/ /* HOST-SIZE FIELD UPDATE FUNCTION */ /***********************************/ void updateHost(const double * __restrict__ h_uold, const double * __restrict__ h_u, double * __restrict__ h_unew, const double alphaSquared, const int Nx, const int Ny) { for (int j = 1; j < Ny - 1; j++) for (int i = 1; i < Nx - 1; i++) h_unew[j * Nx + i] = 2. * h_u[j * Nx + i] - h_uold[j * Nx + i] + alphaSquared * (h_u[j * Nx + i - 1] + h_u[j * Nx + i + 1] + h_u[(j + 1) * Nx + i] + h_u[(j - 1) * Nx + i] - 4. * h_u[j * Nx + i]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - NO SHARED MEMORY */ /********************************************************/ __global__ void updateDevice_v0(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx - 1) || (tidx == 0) || (tidy >= Ny - 1) || (tidy == 0)) return; d_unew[tidy * Nx + tidx] = 2. * d_u[tidy * Nx + tidx] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v1 */ /********************************************************/ __global__ void updateDevice_v1(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions are NOT loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if ((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY ‐ 1)) && (tidx < Nx - 1) && (tidy < Ny - 1)) // --- If we do not need halo region elements (we are "inside" a thread block, not on the border), then use shared memory. d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x][threadIdx.y] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); else if (tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1) // --- Only update "interior" (not boundary) node points // --- If we need halo region elements, then use global memory. d_unew[tidy * Nx + tidx] = 2. * d_u[tidy * Nx + tidx] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v2 */ /********************************************************/ __global__ void updateDevice_v2(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = blockIdx.x * (BLOCKSIZEX - 2) + threadIdx.x; const int tidy = blockIdx.y * (BLOCKSIZEY - 2) + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions ARE loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if (((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY ‐ 1))) && (tidx < Nx - 1 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x][threadIdx.y] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v3 */ /********************************************************/ __global__ void updateDevice_v3(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double alphaSquared, const int Nx, const int Ny) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; const int tid_block = threadIdx.y * BLOCKSIZEX + threadIdx.x; // --- Flattened thread index within a block const int tidx1 = tid_block % (BLOCKSIZEX + 2); const int tidy1 = tid_block / (BLOCKSIZEY + 2); const int tidx2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) % (BLOCKSIZEX + 2); const int tidy2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) / (BLOCKSIZEY + 2); __shared__ double d_u_sh[BLOCKSIZEX + 2][BLOCKSIZEY + 2]; if (((blockIdx.x * BLOCKSIZEX - 1 + tidx1) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx1) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) >= 0)) d_u_sh[tidx1][tidy1] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx1) + (blockIdx.y * BLOCKSIZEY - 1 + tidy1) * Nx]; if (((tidx2 < (BLOCKSIZEX + 2)) && (tidy2 < (BLOCKSIZEY + 2))) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) >= 0)) d_u_sh[tidx2][tidy2] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx2) + (blockIdx.y * BLOCKSIZEY - 1 + tidy2) * Nx]; __syncthreads(); if ((tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = 2. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1] - d_uold[tidy * Nx + tidx] + alphaSquared * (d_u_sh[threadIdx.x][threadIdx.y + 1] + d_u_sh[threadIdx.x + 2][threadIdx.y + 1] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y + 2] - 4. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1]); } /********/ /* MAIN */ /********/ int main() { const int Nx = 512; // --- Number of mesh points along x const int Ny = 512; // --- Number of mesh points along y const double Lx = 200.; // --- Length of the domain along x const double Ly = 200; // --- Length of the domain along y double *h_x = h_linspace(0., Lx, Nx); // --- Mesh points along x double *h_y = h_linspace(0., Ly, Ny); // --- Mesh points along y const double dx = h_x[2] - h_x[1]; // --- Mesh step along x const double dy = h_y[2] - h_y[1]; // --- Mesh step along y const double v = 5.; // --- Wave speed const double p = 0.0; // --- Wave decay factor const double dt = 0.25 / (v * sqrt((1. / dx) * (1. / dx) + (1. / dy) * (1. / dy))); // --- Time - Step matching the Courant - Friedrichs - Lewy condition const int T = floor((3. * sqrt(Lx * Lx + Ly * Ly) / v) / dt); // --- Total number of time steps double *h_u = (double *)calloc(Nx * Ny, sizeof(double)); // --- Current solution u(x, y, t) - host double *h_uold = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the previous step - host double *h_unew = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the next step - host double *d_u; gpuErrchk(cudaMalloc((void**)&d_u, Nx * Ny * sizeof(double))); // --- Current solution u(x, y, t) - device double *d_uold; gpuErrchk(cudaMalloc((void**)&d_uold, Nx * Ny * sizeof(double))); // --- Solution at the previous step - device double *d_unew; gpuErrchk(cudaMalloc((void**)&d_unew, Nx * Ny * sizeof(double))); // --- Solution at the next step - device gpuErrchk(cudaMemset(d_unew, 0, Nx * Ny * sizeof(double))); // --- Initial conditions const int indxc = floor(Nx / 3) - 1; // --- Index for the source location along x const int indyc = floor(Ny / 2) - 1; // --- Index for the source location along y const double xc = h_x[indxc]; // --- x - coordinate of source const double yc = h_y[indyc]; // --- y - coordinate of source const int indRc = 50; const double Rc = Lx / indRc; for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { if (sqrt((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) <= Rc) h_u[j * Nx + i] = exp(-indRc * ((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) / Lx); h_uold[j * Nx + i] = h_u[j * Nx + i]; } // --- Transfering the initial condition from host to device gpuErrchk(cudaMemcpy(d_uold, h_uold, Nx * Ny * sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_u, h_u, Nx * Ny * sizeof(double), cudaMemcpyHostToDevice)); /*********************/ /* ITERATIONS - HOST */ /*********************/ const double alphaSquared = dt * dt * v * v / (dx * dx); // --- CFL number for (int tt = 0; tt < T; tt++) { updateHost(h_uold, h_u, h_unew, alphaSquared, Nx, Ny); h_uold = h_u; // --- Curent solution becomes old h_u = h_unew; // --- New solution becomes current h_unew = h_uold; } /***********************/ /* ITERATIONS - DEVICE */ /***********************/ // --- For the cases of no shared memory and shared memory v1 and v3 dim3 Grid(iDivUp(Nx, BLOCKSIZEX), iDivUp(Ny, BLOCKSIZEY)); dim3 Block(BLOCKSIZEX, BLOCKSIZEY); // --- For the case of shared memory v2 only //dim3 Grid(iDivUp(Nx, BLOCKSIZEX - 2), iDivUp(Ny, BLOCKSIZEY - 2)); //dim3 Block(BLOCKSIZEX, BLOCKSIZEY); TimingGPU timerGPU; timerGPU.StartCounter(); for (int tt = 0; tt < T; tt++) { //updateDevice_v0<<<Grid, Block>>>(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); //updateDevice_v1<<<Grid, Block>>>(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); //updateDevice_v2 << <Grid, Block >> >(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); updateDevice_v3 << <Grid, Block >> >(d_uold, d_u, d_unew, alphaSquared, Nx, Ny); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif d_uold = d_u; // --- Curent solution becomes old d_u = d_unew; // --- New solution becomes current d_unew = d_uold; } printf("GPU timing %f\n", timerGPU.GetCounter()); saveCPUrealtxt(h_u, "D:\\PDEs\\Wave-Equation\\2D\\Matlab\\FDTD2D_hostResult.txt", Nx * Ny); double *h_uDevice = (double *)malloc(Nx * Ny * sizeof(double)); gpuErrchk(cudaMemcpy(h_uDevice, d_u, Nx * Ny * sizeof(double), cudaMemcpyDeviceToHost)); saveCPUrealtxt(h_uDevice, "D:\\PDEs\\Wave-Equation\\2D\\Matlab\\FDTD2D_deviceResult.txt", Nx * Ny); return 0; }
18b5619e62131a0cdbd863a170b9ad0cfc395549.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] += bias[i % nBias]; }
18b5619e62131a0cdbd863a170b9ad0cfc395549.cu
#include "includes.h" __global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] += bias[i % nBias]; }
c26852ee943123fc5dd4213ba88f1f0f8388eb78.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies * with kernel execution. This sample illustrates the usage of CUDA streams to * achieve overlapping of kernel execution with copying data to and from the device. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ const char *sSDKname = "simpleMultiCopy"; // includes, system #include <stdio.h> // include CUDA #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper for shared that are common to CUDA SDK samples #define EXIT_WAIVED 2 // includes, kernels // Declare the CUDA kernels here and main() code that is needed to launch // Compute workload on the system __global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i=0; i<inner_reps; ++i) { g_out[idx] = g_in[idx] + 1; } } } #define STREAM_COUNT 4 // Uncomment to simulate data source/sink IO times //#define SIMULATE_IO int *h_data_source; int *h_data_sink; int *h_data_in[STREAM_COUNT]; int *d_data_in[STREAM_COUNT]; int *h_data_out[STREAM_COUNT]; int *d_data_out[STREAM_COUNT]; hipEvent_t cycleDone[STREAM_COUNT]; hipStream_t stream[STREAM_COUNT]; hipEvent_t start, stop; int N = 1 << 22; int nreps = 10; // number of times each experiment is repeated int inner_reps = 5; int memsize; dim3 block(512); dim3 grid; int thread_blocks; float processWithStreams(int streams_used); void init(); bool test(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int cuda_device = 0; float scale_factor; hipDeviceProp_t deviceProp; printf("[%s] - Starting...\n", sSDKname); if (checkCmdLineFlag(argc, (const char **)argv, "device")) { cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (cuda_device < 0) { printf("Invalid command line parameters\n"); exit(EXIT_FAILURE); } else { printf("cuda_device = %d\n", cuda_device); cuda_device = gpuDeviceInit(cuda_device); if (cuda_device < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); } } } else { // Otherwise pick the device with the highest Gflops/s cuda_device = gpuGetMaxGflopsDeviceId(); checkCudaErrors(hipSetDevice(cuda_device)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name); } checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); N = (int)((float)N / scale_factor); printf("> Device name: %s\n", deviceProp.name); printf("> CUDA Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); printf("> scale_factor = %.2f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", N); memsize = N * sizeof(int); thread_blocks = N / block.x; grid.x = thread_blocks % 65535; grid.y = (thread_blocks / 65535 + 1); // Allocate resources h_data_source = (int *) malloc(memsize); h_data_sink = (int *) malloc(memsize); for (int i =0; i<STREAM_COUNT; ++i) { checkCudaErrors(hipHostMalloc(&h_data_in[i], memsize, hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_data_in[i], memsize)); checkCudaErrors(hipHostMalloc(&h_data_out[i], memsize, hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_data_out[i], memsize)); checkCudaErrors(hipStreamCreate(&stream[i])); checkCudaErrors(hipEventCreate(&cycleDone[i])); hipEventRecord(cycleDone[i], stream[i]); } hipEventCreate(&start); hipEventCreate(&stop); init(); // Kernel warmup hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, 0, d_data_out[0], d_data_in[0], N, inner_reps); // Time copies and kernel hipEventRecord(start,0); checkCudaErrors(hipMemcpyAsync(d_data_in[0], h_data_in[0], memsize, hipMemcpyHostToDevice,0)); hipEventRecord(stop,0); hipEventSynchronize(stop); float memcpy_h2d_time; hipEventElapsedTime(&memcpy_h2d_time, start, stop); hipEventRecord(start,0); checkCudaErrors(hipMemcpyAsync(h_data_out[0], d_data_out[0], memsize, hipMemcpyDeviceToHost, 0)); hipEventRecord(stop,0); hipEventSynchronize(stop); float memcpy_d2h_time; hipEventElapsedTime(&memcpy_d2h_time, start, stop); hipEventRecord(start,0); hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block),0,0, d_data_out[0], d_data_in[0], N, inner_reps); hipEventRecord(stop,0); hipEventSynchronize(stop); float kernel_time; hipEventElapsedTime(&kernel_time, start, stop); printf("\n"); printf("Relevant properties of this CUDA device\n"); printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " "); //printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " "); printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n" " (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n", (deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1) ? "X" : " "); printf("\n"); printf("Measured timings (throughput):\n"); printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time); printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time); printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time); printf("\n"); printf("Theoretical limits for speedup gained from overlapped data transfers:\n"); printf("No overlap at all (transfer-kernel-transfer): %f ms \n", memcpy_h2d_time + memcpy_d2h_time + kernel_time); printf("Compute can overlap with one transfer: %f ms\n", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time)); printf("Compute can overlap with both data transfers: %f ms\n", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time)); // Process pipelined work float serial_time = processWithStreams(1); float overlap_time = processWithStreams(STREAM_COUNT); printf("\nAverage measured timings over %d repetitions:\n", nreps); printf(" Avg. time when execution fully serialized\t: %f ms\n", serial_time / nreps); printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT, overlap_time / nreps); printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n", (serial_time - overlap_time) / nreps); printf("\nMeasured throughput:\n"); printf(" Fully serialized execution\t\t: %f GB/s\n", (nreps * (memsize * 2e-6))/ serial_time); printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time); // Verify the results, we will use the results for final output bool bResults = test(); // Free resources free(h_data_source); free(h_data_sink); for (int i =0; i<STREAM_COUNT; ++i) { hipHostFree(h_data_in[i]); hipFree(d_data_in[i]); hipHostFree(h_data_out[i]); hipFree(d_data_out[i]); hipStreamDestroy(stream[i]); hipEventDestroy(cycleDone[i]); } hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); // Test result exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE); } float processWithStreams(int streams_used) { int current_stream = 0; float time; // Do processing in a loop // // Note: All memory commands are processed in the order they are issued, // independent of the stream they are enqueued in. Hence the pattern by // which the copy and kernel commands are enqueued in the stream // has an influence on the achieved overlap. hipEventRecord(start, 0); for (int i=0; i<nreps; ++i) { int next_stream = (current_stream + 1) % streams_used; #ifdef SIMULATE_IO // Store the result memcpy(h_data_sink, h_data_out[current_stream],memsize); // Read new input memcpy(h_data_in[next_stream], h_data_source, memsize); #endif // Ensure that processing and copying of the last cycle has finished hipEventSynchronize(cycleDone[next_stream]); // Process current frame hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, stream[current_stream], d_data_out[current_stream], d_data_in[current_stream], N, inner_reps); // Upload next frame checkCudaErrors(hipMemcpyAsync( d_data_in[next_stream], h_data_in[next_stream], memsize, hipMemcpyHostToDevice, stream[next_stream])); // Download current frame checkCudaErrors(hipMemcpyAsync( h_data_out[current_stream], d_data_out[current_stream], memsize, hipMemcpyDeviceToHost, stream[current_stream])); checkCudaErrors(hipEventRecord( cycleDone[current_stream], stream[current_stream])); current_stream = next_stream; } hipEventRecord(stop, 0); hipDeviceSynchronize(); hipEventElapsedTime(&time, start, stop); return time; } void init() { for (int i=0; i<N; ++i) { h_data_source[i] = 0; } for (int i =0; i<STREAM_COUNT; ++i) { memcpy(h_data_in[i], h_data_source, memsize); } } bool test() { bool passed = true; for (int j =0; j<STREAM_COUNT; ++j) { for (int i =0; i<N; ++i) { passed &= (h_data_out[j][i] == 1); } } return passed; }
c26852ee943123fc5dd4213ba88f1f0f8388eb78.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies * with kernel execution. This sample illustrates the usage of CUDA streams to * achieve overlapping of kernel execution with copying data to and from the device. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ const char *sSDKname = "simpleMultiCopy"; // includes, system #include <stdio.h> // include CUDA #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper for shared that are common to CUDA SDK samples #define EXIT_WAIVED 2 // includes, kernels // Declare the CUDA kernels here and main() code that is needed to launch // Compute workload on the system __global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i=0; i<inner_reps; ++i) { g_out[idx] = g_in[idx] + 1; } } } #define STREAM_COUNT 4 // Uncomment to simulate data source/sink IO times //#define SIMULATE_IO int *h_data_source; int *h_data_sink; int *h_data_in[STREAM_COUNT]; int *d_data_in[STREAM_COUNT]; int *h_data_out[STREAM_COUNT]; int *d_data_out[STREAM_COUNT]; cudaEvent_t cycleDone[STREAM_COUNT]; cudaStream_t stream[STREAM_COUNT]; cudaEvent_t start, stop; int N = 1 << 22; int nreps = 10; // number of times each experiment is repeated int inner_reps = 5; int memsize; dim3 block(512); dim3 grid; int thread_blocks; float processWithStreams(int streams_used); void init(); bool test(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int cuda_device = 0; float scale_factor; cudaDeviceProp deviceProp; printf("[%s] - Starting...\n", sSDKname); if (checkCmdLineFlag(argc, (const char **)argv, "device")) { cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (cuda_device < 0) { printf("Invalid command line parameters\n"); exit(EXIT_FAILURE); } else { printf("cuda_device = %d\n", cuda_device); cuda_device = gpuDeviceInit(cuda_device); if (cuda_device < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); } } } else { // Otherwise pick the device with the highest Gflops/s cuda_device = gpuGetMaxGflopsDeviceId(); checkCudaErrors(cudaSetDevice(cuda_device)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name); } checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); N = (int)((float)N / scale_factor); printf("> Device name: %s\n", deviceProp.name); printf("> CUDA Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); printf("> scale_factor = %.2f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", N); memsize = N * sizeof(int); thread_blocks = N / block.x; grid.x = thread_blocks % 65535; grid.y = (thread_blocks / 65535 + 1); // Allocate resources h_data_source = (int *) malloc(memsize); h_data_sink = (int *) malloc(memsize); for (int i =0; i<STREAM_COUNT; ++i) { checkCudaErrors(cudaHostAlloc(&h_data_in[i], memsize, cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_data_in[i], memsize)); checkCudaErrors(cudaHostAlloc(&h_data_out[i], memsize, cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_data_out[i], memsize)); checkCudaErrors(cudaStreamCreate(&stream[i])); checkCudaErrors(cudaEventCreate(&cycleDone[i])); cudaEventRecord(cycleDone[i], stream[i]); } cudaEventCreate(&start); cudaEventCreate(&stop); init(); // Kernel warmup incKernel<<<grid, block>>>(d_data_out[0], d_data_in[0], N, inner_reps); // Time copies and kernel cudaEventRecord(start,0); checkCudaErrors(cudaMemcpyAsync(d_data_in[0], h_data_in[0], memsize, cudaMemcpyHostToDevice,0)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_h2d_time; cudaEventElapsedTime(&memcpy_h2d_time, start, stop); cudaEventRecord(start,0); checkCudaErrors(cudaMemcpyAsync(h_data_out[0], d_data_out[0], memsize, cudaMemcpyDeviceToHost, 0)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_d2h_time; cudaEventElapsedTime(&memcpy_d2h_time, start, stop); cudaEventRecord(start,0); incKernel<<<grid, block,0,0>>>(d_data_out[0], d_data_in[0], N, inner_reps); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float kernel_time; cudaEventElapsedTime(&kernel_time, start, stop); printf("\n"); printf("Relevant properties of this CUDA device\n"); printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " "); //printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " "); printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n" " (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n", (deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1) ? "X" : " "); printf("\n"); printf("Measured timings (throughput):\n"); printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time); printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time); printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time); printf("\n"); printf("Theoretical limits for speedup gained from overlapped data transfers:\n"); printf("No overlap at all (transfer-kernel-transfer): %f ms \n", memcpy_h2d_time + memcpy_d2h_time + kernel_time); printf("Compute can overlap with one transfer: %f ms\n", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time)); printf("Compute can overlap with both data transfers: %f ms\n", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time)); // Process pipelined work float serial_time = processWithStreams(1); float overlap_time = processWithStreams(STREAM_COUNT); printf("\nAverage measured timings over %d repetitions:\n", nreps); printf(" Avg. time when execution fully serialized\t: %f ms\n", serial_time / nreps); printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT, overlap_time / nreps); printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n", (serial_time - overlap_time) / nreps); printf("\nMeasured throughput:\n"); printf(" Fully serialized execution\t\t: %f GB/s\n", (nreps * (memsize * 2e-6))/ serial_time); printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time); // Verify the results, we will use the results for final output bool bResults = test(); // Free resources free(h_data_source); free(h_data_sink); for (int i =0; i<STREAM_COUNT; ++i) { cudaFreeHost(h_data_in[i]); cudaFree(d_data_in[i]); cudaFreeHost(h_data_out[i]); cudaFree(d_data_out[i]); cudaStreamDestroy(stream[i]); cudaEventDestroy(cycleDone[i]); } cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); // Test result exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE); } float processWithStreams(int streams_used) { int current_stream = 0; float time; // Do processing in a loop // // Note: All memory commands are processed in the order they are issued, // independent of the stream they are enqueued in. Hence the pattern by // which the copy and kernel commands are enqueued in the stream // has an influence on the achieved overlap. cudaEventRecord(start, 0); for (int i=0; i<nreps; ++i) { int next_stream = (current_stream + 1) % streams_used; #ifdef SIMULATE_IO // Store the result memcpy(h_data_sink, h_data_out[current_stream],memsize); // Read new input memcpy(h_data_in[next_stream], h_data_source, memsize); #endif // Ensure that processing and copying of the last cycle has finished cudaEventSynchronize(cycleDone[next_stream]); // Process current frame incKernel<<<grid, block, 0, stream[current_stream]>>>( d_data_out[current_stream], d_data_in[current_stream], N, inner_reps); // Upload next frame checkCudaErrors(cudaMemcpyAsync( d_data_in[next_stream], h_data_in[next_stream], memsize, cudaMemcpyHostToDevice, stream[next_stream])); // Download current frame checkCudaErrors(cudaMemcpyAsync( h_data_out[current_stream], d_data_out[current_stream], memsize, cudaMemcpyDeviceToHost, stream[current_stream])); checkCudaErrors(cudaEventRecord( cycleDone[current_stream], stream[current_stream])); current_stream = next_stream; } cudaEventRecord(stop, 0); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); return time; } void init() { for (int i=0; i<N; ++i) { h_data_source[i] = 0; } for (int i =0; i<STREAM_COUNT; ++i) { memcpy(h_data_in[i], h_data_source, memsize); } } bool test() { bool passed = true; for (int j =0; j<STREAM_COUNT; ++j) { for (int i =0; i<N; ++i) { passed &= (h_data_out[j][i] == 1); } } return passed; }
cbcda169c26331ad486919123d239743b9de5c7f.hip
// !!! This is a file automatically generated by hipify!!! #include "fft_product2.cu" static int prod_fprop_complex(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjWeight = lua_toboolean(L,4); bool accumulate = false; luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, output->nDimension == 5, 2, "output should be 4D complex tensor"); luaL_argcheck(L, weight->nDimension == 5, 2, "kernel should be 4D complex tensor"); long nMinibatch = input->size[0]; long nOutputPlanes = weight->size[0]; long nInputPlanes = weight->size[1]; long nRows = input->size[2]; long nCols = input->size[3]; long planeSize = nRows*nCols; hipComplex *input_data = (hipComplex*)THCudaTensor_data(NULL,input); hipComplex *weight_data = (hipComplex*)THCudaTensor_data(NULL,weight); hipComplex *output_data = (hipComplex*)THCudaTensor_data(NULL,output); fft_product_call(input_data, weight_data, output_data, nRows, nCols, nMinibatch, nInputPlanes*planeSize, nOutputPlanes*planeSize, nInputPlanes, planeSize, planeSize, nOutputPlanes, nInputPlanes*planeSize, planeSize, accumulate, conjWeight); return 0; } static int prod_bprop_complex(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjWeight = lua_toboolean(L, 4); bool accumulate = false; luaL_argcheck(L, gradInput->nDimension == 5, 2, "gradInput should be 4D complex tensor"); luaL_argcheck(L, weight->nDimension == 5, 2, "weight should be 4D complex tensor"); luaL_argcheck(L, gradOutput->nDimension == 5, 2, "gradOutput should be 4D complex tensor"); long nMinibatch = gradInput->size[0]; long nOutputPlanes = weight->size[0]; long nInputPlanes = weight->size[1]; long nRows = gradInput->size[2]; long nCols = gradInput->size[3]; hipComplex *gradOutput_data = (hipComplex*)THCudaTensor_data(NULL,gradOutput); hipComplex *weight_data = (hipComplex*)THCudaTensor_data(NULL,weight); hipComplex *gradInput_data = (hipComplex*)THCudaTensor_data(NULL,gradInput); fft_product_call(gradOutput_data, weight_data, gradInput_data, nRows, nCols, nMinibatch, nOutputPlanes*nRows*nCols, nInputPlanes*nRows*nCols, nOutputPlanes, nRows*nCols, nRows*nCols*nInputPlanes, nInputPlanes, nRows*nCols, nRows*nCols, accumulate, conjWeight); return 0; } static int prod_accgrad_complex(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradWeight = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjGradOutput = lua_toboolean(L, 4); bool accumulate = false; luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, gradOutput->nDimension == 5, 2, "gradOutput should be 4D complex tensor"); luaL_argcheck(L, gradWeight->nDimension == 5, 2, "gradWeight should be 4D complex tensor"); long nMinibatch = input->size[0]; long nOutputPlanes = gradWeight->size[0]; long nInputPlanes = gradWeight->size[1]; long nRows = input->size[2]; long nCols = input->size[3]; hipComplex *input_data = (hipComplex*)THCudaTensor_data(NULL,input); hipComplex *gradOutput_data = (hipComplex*)THCudaTensor_data(NULL,gradOutput); hipComplex *gradWeight_data = (hipComplex*)THCudaTensor_data(NULL,gradWeight); fft_product_call(input_data, gradOutput_data, gradWeight_data, nRows, nCols, nInputPlanes, nRows*nCols, nRows*nCols, nMinibatch, nInputPlanes*nRows*nCols, nOutputPlanes*nRows*nCols, nOutputPlanes, nRows*nCols, nInputPlanes*nRows*nCols, accumulate, conjGradOutput); return 0; }
cbcda169c26331ad486919123d239743b9de5c7f.cu
#include "fft_product2.cu" static int prod_fprop_complex(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjWeight = lua_toboolean(L,4); bool accumulate = false; luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, output->nDimension == 5, 2, "output should be 4D complex tensor"); luaL_argcheck(L, weight->nDimension == 5, 2, "kernel should be 4D complex tensor"); long nMinibatch = input->size[0]; long nOutputPlanes = weight->size[0]; long nInputPlanes = weight->size[1]; long nRows = input->size[2]; long nCols = input->size[3]; long planeSize = nRows*nCols; cuComplex *input_data = (cuComplex*)THCudaTensor_data(NULL,input); cuComplex *weight_data = (cuComplex*)THCudaTensor_data(NULL,weight); cuComplex *output_data = (cuComplex*)THCudaTensor_data(NULL,output); fft_product_call(input_data, weight_data, output_data, nRows, nCols, nMinibatch, nInputPlanes*planeSize, nOutputPlanes*planeSize, nInputPlanes, planeSize, planeSize, nOutputPlanes, nInputPlanes*planeSize, planeSize, accumulate, conjWeight); return 0; } static int prod_bprop_complex(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjWeight = lua_toboolean(L, 4); bool accumulate = false; luaL_argcheck(L, gradInput->nDimension == 5, 2, "gradInput should be 4D complex tensor"); luaL_argcheck(L, weight->nDimension == 5, 2, "weight should be 4D complex tensor"); luaL_argcheck(L, gradOutput->nDimension == 5, 2, "gradOutput should be 4D complex tensor"); long nMinibatch = gradInput->size[0]; long nOutputPlanes = weight->size[0]; long nInputPlanes = weight->size[1]; long nRows = gradInput->size[2]; long nCols = gradInput->size[3]; cuComplex *gradOutput_data = (cuComplex*)THCudaTensor_data(NULL,gradOutput); cuComplex *weight_data = (cuComplex*)THCudaTensor_data(NULL,weight); cuComplex *gradInput_data = (cuComplex*)THCudaTensor_data(NULL,gradInput); fft_product_call(gradOutput_data, weight_data, gradInput_data, nRows, nCols, nMinibatch, nOutputPlanes*nRows*nCols, nInputPlanes*nRows*nCols, nOutputPlanes, nRows*nCols, nRows*nCols*nInputPlanes, nInputPlanes, nRows*nCols, nRows*nCols, accumulate, conjWeight); return 0; } static int prod_accgrad_complex(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradWeight = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); bool conjGradOutput = lua_toboolean(L, 4); bool accumulate = false; luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, gradOutput->nDimension == 5, 2, "gradOutput should be 4D complex tensor"); luaL_argcheck(L, gradWeight->nDimension == 5, 2, "gradWeight should be 4D complex tensor"); long nMinibatch = input->size[0]; long nOutputPlanes = gradWeight->size[0]; long nInputPlanes = gradWeight->size[1]; long nRows = input->size[2]; long nCols = input->size[3]; cuComplex *input_data = (cuComplex*)THCudaTensor_data(NULL,input); cuComplex *gradOutput_data = (cuComplex*)THCudaTensor_data(NULL,gradOutput); cuComplex *gradWeight_data = (cuComplex*)THCudaTensor_data(NULL,gradWeight); fft_product_call(input_data, gradOutput_data, gradWeight_data, nRows, nCols, nInputPlanes, nRows*nCols, nRows*nCols, nMinibatch, nInputPlanes*nRows*nCols, nOutputPlanes*nRows*nCols, nOutputPlanes, nRows*nCols, nInputPlanes*nRows*nCols, accumulate, conjGradOutput); return 0; }
a2194cfe9afebde4f75a97f32adc4448b9840336.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include "spamfilter_utils_hip.cuh" #include "spamfilter_timer.h" // adds two device vectors with CuBLAS and stores the results in the first one void p_add_vectors(hipblasHandle_t handle, float* a, float* b, const size_t size, const float scale_for_a) { hipblasSaxpy(handle, size, &scale_for_a, b, 1, a, 1); } // computes dot product with CuBLAS for two given vectors a and b float p_dot_product(hipblasHandle_t handle, float* d_a, float* d_b, const size_t num_elems) { float result[1]; hipblasSdot (handle, num_elems, d_a, 1, d_b, 1, result); hipDeviceSynchronize(); return *result; } // computes logistic function for a given parameter vector (theta) and a data point (x_i) double p_sigmoid(hipblasHandle_t handle, FeatureType* d_theta, FeatureType* d_x_i, const size_t num_feats) { return sigmoid(p_dot_product(handle, d_theta, d_x_i, num_feats)); } // updates the parameters (theta) void p_updateParameters(hipblasHandle_t handle, FeatureType* d_theta, FeatureType* d_gradient, size_t num_feats, float step_size, bool revert) { float sign = revert ? 1 : -1; step_size *= sign; hipblasSaxpy(handle, num_feats, &step_size, d_gradient, 1, d_theta, 1); }
a2194cfe9afebde4f75a97f32adc4448b9840336.cu
#include <math.h> #include "spamfilter_utils_cuda.cuh" #include "spamfilter_timer.h" // adds two device vectors with CuBLAS and stores the results in the first one void p_add_vectors(cublasHandle_t handle, float* a, float* b, const size_t size, const float scale_for_a) { cublasSaxpy(handle, size, &scale_for_a, b, 1, a, 1); } // computes dot product with CuBLAS for two given vectors a and b float p_dot_product(cublasHandle_t handle, float* d_a, float* d_b, const size_t num_elems) { float result[1]; cublasSdot (handle, num_elems, d_a, 1, d_b, 1, result); cudaDeviceSynchronize(); return *result; } // computes logistic function for a given parameter vector (theta) and a data point (x_i) double p_sigmoid(cublasHandle_t handle, FeatureType* d_theta, FeatureType* d_x_i, const size_t num_feats) { return sigmoid(p_dot_product(handle, d_theta, d_x_i, num_feats)); } // updates the parameters (theta) void p_updateParameters(cublasHandle_t handle, FeatureType* d_theta, FeatureType* d_gradient, size_t num_feats, float step_size, bool revert) { float sign = revert ? 1 : -1; step_size *= sign; cublasSaxpy(handle, num_feats, &step_size, d_gradient, 1, d_theta, 1); }
3ad7d0c8423558ad81d639d40bba32622c626dea.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, the YACCLAB contributors, as // shown by the AUTHORS file. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Rasmusson2013 #define BLOCK_SIZE 32 // this must be multiple of the warp size (leave it to 32) #define PATCH_SIZE (BLOCK_SIZE + 2) using namespace cv; using namespace std; namespace { // This kernel makes use of a (BLOCK_SIZE + 2) X (BLOCK_SIZE + 2) array in shared memory // The paper actually says (BLOCK_SIZE + 1) X (BLOCK_SIZE + 1), but I can't manage to make it work that way __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned img_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned char img_patch[PATCH_SIZE * PATCH_SIZE]; const bool in_limits = r < img.rows&& c < img.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_img_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_img_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_img_index1 = patch_img_r1 * img.step + patch_img_c1; const bool patch_in_limits1 = patch_img_r1 >= 0 && patch_img_c1 >= 0 && patch_img_r1 < img.rows&& patch_img_c1 < img.cols; img_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? img[patch_img_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_img_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_img_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_img_index2 = patch_img_r2 * img.step + patch_img_c2; const bool patch_in_limits2 = patch_img_r2 >= 0 && patch_img_c2 >= 0 && patch_img_r2 < img.rows&& patch_img_c2 < img.cols; img_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? img[patch_img_index2] : 0; } __syncthreads(); if (in_limits) { unsigned int connections = 0; unsigned label = 0; if (img_patch[img_patch_index]) { label = labels_index + 1; // Enrich label with connections information if (img_patch[img_patch_index - PATCH_SIZE - 1]) { connections |= (1u << 31); } if (img_patch[img_patch_index - PATCH_SIZE]) { connections |= (1u << 30); } if (img_patch[img_patch_index - PATCH_SIZE + 1]) { connections |= (1u << 29); } if (img_patch[img_patch_index + 1]) { connections |= (1u << 28); } label |= connections; } labels[labels_index] = label; } } __global__ void Propagate(cuda::PtrStepSzi labels, char* changed) { bool thread_changed = false; const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned labels_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned labels_patch[PATCH_SIZE * PATCH_SIZE]; __shared__ bool something_changed[1]; const bool in_limits = r < labels.rows&& c < labels.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // 2 rounds are enough only for BLOCK_SIZE >= 5 // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_labels_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_labels_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_labels_index1 = patch_labels_r1 * (labels.step / labels.elem_size) + patch_labels_c1; const bool patch_in_limits1 = patch_labels_r1 >= 0 && patch_labels_c1 >= 0 && patch_labels_r1 < labels.rows&& patch_labels_c1 < labels.cols; labels_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? labels[patch_labels_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; const int patch_labels_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_labels_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_labels_index2 = patch_labels_r2 * (labels.step / labels.elem_size) + patch_labels_c2; const bool patch_in_limits2 = patch_labels_r2 >= 0 && patch_labels_c2 >= 0 && patch_labels_r2 < labels.rows&& patch_labels_c2 < labels.cols; labels_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? labels[patch_labels_index2] : 0; } do { if (threadIdx.x == 0 && threadIdx.y == 0) { something_changed[0] = false; } __syncthreads(); thread_changed = false; // Primary/Secondary Optimization // Find the primary pixel of the sub-component, and add its label to the propagation. if (true) { const unsigned label = labels_patch[labels_patch_index]; unsigned min_label = label & 0x0FFFFFFF; if (min_label) { const int primary_r = ((label & 0x0FFFFFFF) - 1) / (labels.step / labels.elem_size); const int primary_c = ((label & 0x0FFFFFFF) - 1) % (labels.step / labels.elem_size); // Check if the primary pixel is in the same block // If it is, take its current label as the minimum if (primary_r >= (blockIdx.y * BLOCK_SIZE - 1) && primary_r <= (blockIdx.y + 1) * BLOCK_SIZE && primary_c >= (blockIdx.x * BLOCK_SIZE - 1) && primary_c <= (blockIdx.x + 1) * BLOCK_SIZE) { const int primary_local_r = primary_r - blockIdx.y * BLOCK_SIZE; const int primary_local_c = primary_c - blockIdx.x * BLOCK_SIZE; min_label = min(min_label, labels_patch[(primary_local_r + 1) * PATCH_SIZE + (primary_local_c + 1)] & 0x0FFFFFFF); } } if (min_label < (label & 0x0FFFFFFF)) { labels_patch[labels_patch_index] = min_label | (label & 0xF0000000); thread_changed = true; } } __syncthreads(); // Propagation sizes are calculated in every propagation step if (true) { // UP-LEFT // This is a bit convoluted, because we need pixels on a diagonal line // to be processed by threads belonging to the same warp. // The pixel-warp mapping is the following (for WARP_SIZE = BLOCK_SIZE = 4): // +---+---+---+---+ // | 0 | 1 | 2 | 3 | // +---+---+---+---+ // | 3 | 0 | 1 | 2 | // +---+---+---+---+ // | 2 | 3 | 0 | 1 | // +---+---+---+---+ // | 1 | 2 | 3 | 0 | // +---+---+---+---+ const unsigned patch_r_dir = threadIdx.x; const unsigned patch_c_dir = (threadIdx.x + threadIdx.y) % BLOCK_SIZE; const unsigned patch_index_dir = (patch_r_dir + 1) * PATCH_SIZE + patch_c_dir + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 31) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(patch_r_dir) - static_cast<int>(prop) >= 0 && static_cast<int>(patch_c_dir) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * (PATCH_SIZE + 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN-RIGHT prop = ((labels_patch[patch_index_dir + PATCH_SIZE + 1] >> 31) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (patch_r_dir + prop < BLOCK_SIZE && patch_c_dir + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * (PATCH_SIZE + 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // UP-RIGHT // This is a bit convoluted, because we need pixels on a diagonal line // to be processed by threads belonging to the same warp. // The pixel-warp mapping is the following (for WARP_SIZE = BLOCK_SIZE = 4): // +---+---+---+---+ // | 1 | 2 | 3 | 0 | // +---+---+---+---+ // | 2 | 3 | 0 | 1 | // +---+---+---+---+ // | 3 | 0 | 1 | 2 | // +---+---+---+---+ // | 0 | 1 | 2 | 3 | // +---+---+---+---+ const unsigned patch_r_dir = threadIdx.x; const unsigned patch_c_dir = (BLOCK_SIZE - 1 - threadIdx.x + threadIdx.y) % BLOCK_SIZE; const unsigned patch_index_dir = (patch_r_dir + 1) * PATCH_SIZE + patch_c_dir + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 29) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(patch_r_dir) - static_cast<int>(prop) >= 0 && patch_c_dir + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * (PATCH_SIZE - 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN-LEFT prop = ((labels_patch[patch_index_dir + PATCH_SIZE - 1] >> 29) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (patch_r_dir + prop < BLOCK_SIZE && static_cast<int>(patch_c_dir) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * (PATCH_SIZE - 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // UP // warp x takes care of COLUMN x, up to down unsigned patch_index_dir = (threadIdx.x + 1) * PATCH_SIZE + threadIdx.y + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 30) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(threadIdx.x) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * PATCH_SIZE]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN prop = ((labels_patch[patch_index_dir + PATCH_SIZE] >> 30) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (threadIdx.x + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * PATCH_SIZE]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // RIGHT // patch_index_dir changes for every direction unsigned patch_index_dir = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 28) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (threadIdx.x + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // LEFT prop = ((labels_patch[patch_index_dir - 1] >> 28) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(threadIdx.x) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } if (thread_changed) { something_changed[0] = true; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && something_changed[0]) { *changed = 1; } } //while (something_changed[0]); while (false); // change this with the previous line to add an internal loop - it doesn't seem efficient if (in_limits) { labels[labels_index] = labels_patch[labels_patch_index]; } } __global__ void End(cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * BLOCK_SIZE + threadIdx.y; unsigned global_col = blockIdx.x * BLOCK_SIZE + threadIdx.x; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < labels.rows && global_col < labels.cols) { labels.data[labels_index] &= 0x0FFFFFFF; } } } class RASMUSSON : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; char* d_changed_ptr_; public: RASMUSSON() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (d_img_.rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); block_size_ = dim3(BLOCK_SIZE, BLOCK_SIZE, 1); char changed = 1; char* d_changed_ptr; hipMalloc(&d_changed_ptr, 1); Init << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); while (changed) { changed = 0; hipMemset(d_changed_ptr, 0, 1); Propagate << <grid_size_, block_size_ >> > (d_img_labels_, d_changed_ptr); hipMemcpy(&changed, d_changed_ptr, 1, hipMemcpyDeviceToHost); } End << <grid_size_, block_size_ >> > (d_img_labels_); hipFree(d_changed_ptr); hipDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (d_img_.rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); block_size_ = dim3(BLOCK_SIZE, BLOCK_SIZE, 1); hipMalloc(&d_changed_ptr_, 1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); hipFree(d_changed_ptr_); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { Init << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); char changed = 1; while (changed) { changed = 0; hipMemset(d_changed_ptr_, 0, 1); Propagate << <grid_size_, block_size_ >> > (d_img_labels_, d_changed_ptr_); hipMemcpy(&changed, d_changed_ptr_, 1, hipMemcpyDeviceToHost); } End << <grid_size_, block_size_ >> > (d_img_labels_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { // This doesn't really make sense, there are not two separate scans double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(RASMUSSON);
3ad7d0c8423558ad81d639d40bba32622c626dea.cu
// Copyright (c) 2020, the YACCLAB contributors, as // shown by the AUTHORS file. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Rasmusson2013 #define BLOCK_SIZE 32 // this must be multiple of the warp size (leave it to 32) #define PATCH_SIZE (BLOCK_SIZE + 2) using namespace cv; using namespace std; namespace { // This kernel makes use of a (BLOCK_SIZE + 2) X (BLOCK_SIZE + 2) array in shared memory // The paper actually says (BLOCK_SIZE + 1) X (BLOCK_SIZE + 1), but I can't manage to make it work that way __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned img_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned char img_patch[PATCH_SIZE * PATCH_SIZE]; const bool in_limits = r < img.rows&& c < img.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_img_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_img_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_img_index1 = patch_img_r1 * img.step + patch_img_c1; const bool patch_in_limits1 = patch_img_r1 >= 0 && patch_img_c1 >= 0 && patch_img_r1 < img.rows&& patch_img_c1 < img.cols; img_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? img[patch_img_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_img_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_img_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_img_index2 = patch_img_r2 * img.step + patch_img_c2; const bool patch_in_limits2 = patch_img_r2 >= 0 && patch_img_c2 >= 0 && patch_img_r2 < img.rows&& patch_img_c2 < img.cols; img_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? img[patch_img_index2] : 0; } __syncthreads(); if (in_limits) { unsigned int connections = 0; unsigned label = 0; if (img_patch[img_patch_index]) { label = labels_index + 1; // Enrich label with connections information if (img_patch[img_patch_index - PATCH_SIZE - 1]) { connections |= (1u << 31); } if (img_patch[img_patch_index - PATCH_SIZE]) { connections |= (1u << 30); } if (img_patch[img_patch_index - PATCH_SIZE + 1]) { connections |= (1u << 29); } if (img_patch[img_patch_index + 1]) { connections |= (1u << 28); } label |= connections; } labels[labels_index] = label; } } __global__ void Propagate(cuda::PtrStepSzi labels, char* changed) { bool thread_changed = false; const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned labels_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned labels_patch[PATCH_SIZE * PATCH_SIZE]; __shared__ bool something_changed[1]; const bool in_limits = r < labels.rows&& c < labels.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // 2 rounds are enough only for BLOCK_SIZE >= 5 // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_labels_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_labels_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_labels_index1 = patch_labels_r1 * (labels.step / labels.elem_size) + patch_labels_c1; const bool patch_in_limits1 = patch_labels_r1 >= 0 && patch_labels_c1 >= 0 && patch_labels_r1 < labels.rows&& patch_labels_c1 < labels.cols; labels_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? labels[patch_labels_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; const int patch_labels_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_labels_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_labels_index2 = patch_labels_r2 * (labels.step / labels.elem_size) + patch_labels_c2; const bool patch_in_limits2 = patch_labels_r2 >= 0 && patch_labels_c2 >= 0 && patch_labels_r2 < labels.rows&& patch_labels_c2 < labels.cols; labels_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? labels[patch_labels_index2] : 0; } do { if (threadIdx.x == 0 && threadIdx.y == 0) { something_changed[0] = false; } __syncthreads(); thread_changed = false; // Primary/Secondary Optimization // Find the primary pixel of the sub-component, and add its label to the propagation. if (true) { const unsigned label = labels_patch[labels_patch_index]; unsigned min_label = label & 0x0FFFFFFF; if (min_label) { const int primary_r = ((label & 0x0FFFFFFF) - 1) / (labels.step / labels.elem_size); const int primary_c = ((label & 0x0FFFFFFF) - 1) % (labels.step / labels.elem_size); // Check if the primary pixel is in the same block // If it is, take its current label as the minimum if (primary_r >= (blockIdx.y * BLOCK_SIZE - 1) && primary_r <= (blockIdx.y + 1) * BLOCK_SIZE && primary_c >= (blockIdx.x * BLOCK_SIZE - 1) && primary_c <= (blockIdx.x + 1) * BLOCK_SIZE) { const int primary_local_r = primary_r - blockIdx.y * BLOCK_SIZE; const int primary_local_c = primary_c - blockIdx.x * BLOCK_SIZE; min_label = min(min_label, labels_patch[(primary_local_r + 1) * PATCH_SIZE + (primary_local_c + 1)] & 0x0FFFFFFF); } } if (min_label < (label & 0x0FFFFFFF)) { labels_patch[labels_patch_index] = min_label | (label & 0xF0000000); thread_changed = true; } } __syncthreads(); // Propagation sizes are calculated in every propagation step if (true) { // UP-LEFT // This is a bit convoluted, because we need pixels on a diagonal line // to be processed by threads belonging to the same warp. // The pixel-warp mapping is the following (for WARP_SIZE = BLOCK_SIZE = 4): // +---+---+---+---+ // | 0 | 1 | 2 | 3 | // +---+---+---+---+ // | 3 | 0 | 1 | 2 | // +---+---+---+---+ // | 2 | 3 | 0 | 1 | // +---+---+---+---+ // | 1 | 2 | 3 | 0 | // +---+---+---+---+ const unsigned patch_r_dir = threadIdx.x; const unsigned patch_c_dir = (threadIdx.x + threadIdx.y) % BLOCK_SIZE; const unsigned patch_index_dir = (patch_r_dir + 1) * PATCH_SIZE + patch_c_dir + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 31) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(patch_r_dir) - static_cast<int>(prop) >= 0 && static_cast<int>(patch_c_dir) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * (PATCH_SIZE + 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN-RIGHT prop = ((labels_patch[patch_index_dir + PATCH_SIZE + 1] >> 31) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (patch_r_dir + prop < BLOCK_SIZE && patch_c_dir + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * (PATCH_SIZE + 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // UP-RIGHT // This is a bit convoluted, because we need pixels on a diagonal line // to be processed by threads belonging to the same warp. // The pixel-warp mapping is the following (for WARP_SIZE = BLOCK_SIZE = 4): // +---+---+---+---+ // | 1 | 2 | 3 | 0 | // +---+---+---+---+ // | 2 | 3 | 0 | 1 | // +---+---+---+---+ // | 3 | 0 | 1 | 2 | // +---+---+---+---+ // | 0 | 1 | 2 | 3 | // +---+---+---+---+ const unsigned patch_r_dir = threadIdx.x; const unsigned patch_c_dir = (BLOCK_SIZE - 1 - threadIdx.x + threadIdx.y) % BLOCK_SIZE; const unsigned patch_index_dir = (patch_r_dir + 1) * PATCH_SIZE + patch_c_dir + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 29) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(patch_r_dir) - static_cast<int>(prop) >= 0 && patch_c_dir + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * (PATCH_SIZE - 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN-LEFT prop = ((labels_patch[patch_index_dir + PATCH_SIZE - 1] >> 29) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (patch_r_dir + prop < BLOCK_SIZE && static_cast<int>(patch_c_dir) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * (PATCH_SIZE - 1)]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // UP // warp x takes care of COLUMN x, up to down unsigned patch_index_dir = (threadIdx.x + 1) * PATCH_SIZE + threadIdx.y + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 30) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(threadIdx.x) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - PATCH_SIZE]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop * PATCH_SIZE]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // DOWN prop = ((labels_patch[patch_index_dir + PATCH_SIZE] >> 30) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (threadIdx.x + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + PATCH_SIZE]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop * PATCH_SIZE]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } __syncthreads(); if (true) { // RIGHT // patch_index_dir changes for every direction unsigned patch_index_dir = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; unsigned label_dir = labels_patch[patch_index_dir]; unsigned min_label = label_dir & 0x0FFFFFFF; unsigned prop = ((label_dir >> 28) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_down_sync(0xffffffff, prop, prop); if (threadIdx.x + prop < BLOCK_SIZE) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir + 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir + prop]; min_label = min(min_label, far_label & 0x0FFFFFFF); } // LEFT prop = ((labels_patch[patch_index_dir - 1] >> 28) & 1); // 5 iterations are enough for the longest propagation // Maybe there is a way to end the cycle sooner for (int i = 0; i < 5; ++i) { unsigned delta = __shfl_up_sync(0xffffffff, prop, prop); if (static_cast<int>(threadIdx.x) - static_cast<int>(prop) >= 0) { prop += delta; } } if (prop > 0) { // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_index_dir - 1]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_index_dir - prop]; min_label = min(min_label, far_label & 0x0FFFFFFF); } if (min_label < (label_dir & 0x0FFFFFFF)) { labels_patch[patch_index_dir] = min_label | (label_dir & 0xF0000000); thread_changed = true; } } if (thread_changed) { something_changed[0] = true; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && something_changed[0]) { *changed = 1; } } //while (something_changed[0]); while (false); // change this with the previous line to add an internal loop - it doesn't seem efficient if (in_limits) { labels[labels_index] = labels_patch[labels_patch_index]; } } __global__ void End(cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * BLOCK_SIZE + threadIdx.y; unsigned global_col = blockIdx.x * BLOCK_SIZE + threadIdx.x; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < labels.rows && global_col < labels.cols) { labels.data[labels_index] &= 0x0FFFFFFF; } } } class RASMUSSON : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; char* d_changed_ptr_; public: RASMUSSON() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (d_img_.rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); block_size_ = dim3(BLOCK_SIZE, BLOCK_SIZE, 1); char changed = 1; char* d_changed_ptr; cudaMalloc(&d_changed_ptr, 1); Init << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); while (changed) { changed = 0; cudaMemset(d_changed_ptr, 0, 1); Propagate << <grid_size_, block_size_ >> > (d_img_labels_, d_changed_ptr); cudaMemcpy(&changed, d_changed_ptr, 1, cudaMemcpyDeviceToHost); } End << <grid_size_, block_size_ >> > (d_img_labels_); cudaFree(d_changed_ptr); cudaDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (d_img_.rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); block_size_ = dim3(BLOCK_SIZE, BLOCK_SIZE, 1); cudaMalloc(&d_changed_ptr_, 1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); cudaFree(d_changed_ptr_); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { Init << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); char changed = 1; while (changed) { changed = 0; cudaMemset(d_changed_ptr_, 0, 1); Propagate << <grid_size_, block_size_ >> > (d_img_labels_, d_changed_ptr_); cudaMemcpy(&changed, d_changed_ptr_, 1, cudaMemcpyDeviceToHost); } End << <grid_size_, block_size_ >> > (d_img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { // This doesn't really make sense, there are not two separate scans double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(RASMUSSON);
e46ee6756ec60afef4fe347fa8692bcc18a5dbcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "Instance.cuh" __global__ void d_testFunctions(MemoryManagerBulkAlloc memory_manager) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid > 0) return; int* test_array = reinterpret_cast<int*>(memory_manager.malloc(sizeof(int) * 16)); for(int i = 0; i < 16; ++i) { test_array[i] = i; } memory_manager.free(test_array); printf("It worked!\n"); return; } int main(int argc, char* argv[]) { std::cout << "Simple BulkAlloc Testcase\n"; MemoryManagerBulkAlloc memory_manager(8192ULL * 1024ULL * 1024ULL); // d_testFunctions <<<1,1>>>(memory_manager); hipDeviceSynchronize(); printf("Testcase done!\n"); return 0; }
e46ee6756ec60afef4fe347fa8692bcc18a5dbcc.cu
#include <iostream> #include "Instance.cuh" __global__ void d_testFunctions(MemoryManagerBulkAlloc memory_manager) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid > 0) return; int* test_array = reinterpret_cast<int*>(memory_manager.malloc(sizeof(int) * 16)); for(int i = 0; i < 16; ++i) { test_array[i] = i; } memory_manager.free(test_array); printf("It worked!\n"); return; } int main(int argc, char* argv[]) { std::cout << "Simple BulkAlloc Testcase\n"; MemoryManagerBulkAlloc memory_manager(8192ULL * 1024ULL * 1024ULL); // d_testFunctions <<<1,1>>>(memory_manager); cudaDeviceSynchronize(); printf("Testcase done!\n"); return 0; }
6852d1c4ae5368a6a0c73bbf0950db4bd49cd605.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #include <MurmurHash.hpp> #if __CUDA_ARCH__ >= 300 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif __forceinline__ __device__ int solve1(int j) { float v = sqrtf((float)j); #pragma unroll for (int k = 0; k < 5; k++) { v = v - (v*(v+1)-2*j)/(2*v+1); // Newton iterations to find first index. } return (int)(v+2e-5f); } __forceinline__ __device__ void solvex(int n, int v, int &i, int &j) { int n1 = ((n >> 1) << 1) + 1; int n2 = (n + 1) >> 1; int even = (n1 != n); j = v / n1; i = v - n1 * j; if (j > i - even) { i = n1 - i - 1; j = n2 + n2 - j + 1; } else { i = i - even; } } // Feature hashing multiply and multiply-transpose. // This one enumerates, hashes and multiplies all pairs of features. // // NOTE: The single-matrix version (hashmult) uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs) // Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C __global__ void __hashmult(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { bool doit = false; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs) for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = solve1(j); // Compute the first and second indices int j2 = j - j1*(j1+1)/2; // int j1, j2; // solvex(todo, j, j1, j2); float f1 = Bdata[jstart + j1]; // Get the two features float f2 = Bdata[jstart + j2]; int r1 = Bir[jstart + j1]; // And their row indices int r2 = Bir[jstart + j2]; int ind = mmhash2(r1, r2, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; if (j1 == j2) { doit = (rank < bound1); } else { prod *= f2; rank *= r2 + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { float sum = A[threadIdx.x + nrows * i] * prod; // Do the product atomicAdd(&C[threadIdx.x + nrows * ind], sum); } else { float sum = A[threadIdx.x + nrows * ind] * prod; // Do the product atomicAdd(&C[threadIdx.x + nrows * i], sum); } } } } } __forceinline__ __device__ int hash2(int a, int b, int modulus) { return (((a * 453423453) + b) * 34143242142) % modulus; } #if __CUDA_ARCH__ >= 300 // This version is designed for few (or one) row in A. It allocates one warp per column __global__ void __hashmult2(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { bool doit = false; int istart = ((long long)blockIdx.x) * ncols / gridDim.x; int iend = ((long long)(blockIdx.x+1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows for (int j1 = 0; j1 < nr; j1 += blockDim.x) { // work on a block of data float f1 = 0; int r1 = -1; if (j1 + threadIdx.x < nr) { f1 = Bdata[jstart + j1 + threadIdx.x]; // Get the two features r1 = Bir[jstart + j1 + threadIdx.x]; // And their row indices } for (int j2 = j1; j2 < nr; j2 += blockDim.x) { // work on a block of data float f2 = 0; int r2 = -1; if (j2 + threadIdx.x < nr) { f2 = Bdata[jstart + j2 + threadIdx.x]; r2 = Bir[jstart + j2 + threadIdx.x]; } for (int k = 0; k < 32; k++) { float f2shift = __shfl(f2, k); int r2shift = __shfl(r2, k); if (j2 + k < nr && r1 >= 0) { int ind = mmhash2(r1, r2shift, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; doit = false; if (j1 + threadIdx.x == j2 + k) { doit = (rank < bound1); } else if (j1 + threadIdx.x < j2 + k) { prod *= f2shift; rank *= r2shift + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { for (int m = 0; m < nrows; m++) { float sum = A[m + nrows * i] * prod; // Do the product atomicAdd(&C[m + nrows * ind], sum); // atomicAdd(&C[0], sum); } } else { for (int m = 0; m < nrows; m++) { float sum = A[m + nrows * ind] * prod; // Do the product atomicAdd(&C[m + nrows * i], sum); // atomicAdd(&C[0], sum); } } } } } } } } } #else __global__ void __hashmult2(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {} #endif int hashmult(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { if (nrows >= 0) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __hashmult), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, C, transpose); } else { dim3 threadDim(32, 1, 1); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __hashmult2), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, C, transpose); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __forceinline__ __device__ void __gupdate(float grad, int i, int ithere, int jthere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { float lr, ve, te, pve, ste, ngrad; Sumsq[ithere] += grad * grad + epsilon; if (addgrad) { lr = (lrlen > 1) ? lrate[i] : lrate[0]; ve = (vexplen > 1) ? vexp[i] : vexp[0]; te = (texplen > 1) ? texp[i] : texp[0]; pve = (ve == 0) ? 1.0f : pow(Sumsq[ithere] * istep, ve); ste = pow(istep, te); ngrad = grad * lr * ste / pve; atomicAdd(&MM[ithere], ngrad); } if (Mask != NULL) { if (maskrows > 1) { if (Mask[ithere] == 0) MM[ithere] = 0; } else { if (Mask[jthere] == 0) MM[ithere] = 0; } } } __global__ void __hashmultADAGrad(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { bool doit = false; int ihere, ithere, jthere; float grad; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs) for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = solve1(j); // Compute the first and second indices int j2 = j - j1*(j1+1)/2; // int j1, j2; // solvex(todo, j, j1, j2); float f1 = Bdata[jstart + j1]; // Get the two features float f2 = Bdata[jstart + j2]; int r1 = Bir[jstart + j1]; // And their row indices int r2 = Bir[jstart + j2]; int ind = mmhash2(r1, r2, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; if (j1 == j2) { doit = (rank < bound1); } else { prod *= f2; rank *= r2 + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { ihere = threadIdx.x + nrows * i; ithere = threadIdx.x + nrows * ind; jthere = ind; } else { ithere = threadIdx.x + nrows * i; jthere = i; ihere = threadIdx.x + nrows * ind; } grad = A[ihere] * prod; // raw gradient __gupdate(grad, threadIdx.x, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } int hashmultADAGrad(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __hashmultADAGrad), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, transpose, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) { int r1, r2, ind; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart1 = Bjc[i]; // Range of nz rows in this column of B int jend1 = Bjc[i+1]; int jstart2 = Cjc[i]; // Range of nz rows in this column of C int jend2 = Cjc[i+1]; int nr1 = jend1 - jstart1; // Number of nz rows int nr2 = jend2 - jstart2; // Number of nz rows int todo = (nr1+1) * (nr2+1) - 1; // Number of pairs + singletons to process for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = j / nr2; int j2 = j - j1 * nr2; float prod = 1.0f; int hash = seed; if (j1 < nr1) { prod *= Bdata[jstart1 + j1]; // Get the two features r1 = Bir[jstart1 + j1]; // And their row indices hash = h1(r1, hash); } if (j2 < nr2) { prod *= Cdata[jstart2 + j2]; r2 = Cir[jstart2 + j2]; hash = h1(r2, hash); // Hash the indices } ind = mmhashend(hash, nfeats); if (transpose > 0) { float sum = A[threadIdx.x + nrows * i] * prod; // Do the product atomicAdd(&D[threadIdx.x + nrows * ind], sum); } else { float sum = A[threadIdx.x + nrows * ind] * prod; atomicAdd(&D[threadIdx.x + nrows * i], sum); } } } } int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __hashcross), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, A, Bdata, Bir, Bjc, Cdata, Cir, Cjc, D, transpose); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; }
6852d1c4ae5368a6a0c73bbf0950db4bd49cd605.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #include <MurmurHash.hpp> #if __CUDA_ARCH__ >= 300 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif __forceinline__ __device__ int solve1(int j) { float v = sqrtf((float)j); #pragma unroll for (int k = 0; k < 5; k++) { v = v - (v*(v+1)-2*j)/(2*v+1); // Newton iterations to find first index. } return (int)(v+2e-5f); } __forceinline__ __device__ void solvex(int n, int v, int &i, int &j) { int n1 = ((n >> 1) << 1) + 1; int n2 = (n + 1) >> 1; int even = (n1 != n); j = v / n1; i = v - n1 * j; if (j > i - even) { i = n1 - i - 1; j = n2 + n2 - j + 1; } else { i = i - even; } } // Feature hashing multiply and multiply-transpose. // This one enumerates, hashes and multiplies all pairs of features. // // NOTE: The single-matrix version (hashmult) uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs) // Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C __global__ void __hashmult(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { bool doit = false; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs) for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = solve1(j); // Compute the first and second indices int j2 = j - j1*(j1+1)/2; // int j1, j2; // solvex(todo, j, j1, j2); float f1 = Bdata[jstart + j1]; // Get the two features float f2 = Bdata[jstart + j2]; int r1 = Bir[jstart + j1]; // And their row indices int r2 = Bir[jstart + j2]; int ind = mmhash2(r1, r2, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; if (j1 == j2) { doit = (rank < bound1); } else { prod *= f2; rank *= r2 + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { float sum = A[threadIdx.x + nrows * i] * prod; // Do the product atomicAdd(&C[threadIdx.x + nrows * ind], sum); } else { float sum = A[threadIdx.x + nrows * ind] * prod; // Do the product atomicAdd(&C[threadIdx.x + nrows * i], sum); } } } } } __forceinline__ __device__ int hash2(int a, int b, int modulus) { return (((a * 453423453) + b) * 34143242142) % modulus; } #if __CUDA_ARCH__ >= 300 // This version is designed for few (or one) row in A. It allocates one warp per column __global__ void __hashmult2(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { bool doit = false; int istart = ((long long)blockIdx.x) * ncols / gridDim.x; int iend = ((long long)(blockIdx.x+1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows for (int j1 = 0; j1 < nr; j1 += blockDim.x) { // work on a block of data float f1 = 0; int r1 = -1; if (j1 + threadIdx.x < nr) { f1 = Bdata[jstart + j1 + threadIdx.x]; // Get the two features r1 = Bir[jstart + j1 + threadIdx.x]; // And their row indices } for (int j2 = j1; j2 < nr; j2 += blockDim.x) { // work on a block of data float f2 = 0; int r2 = -1; if (j2 + threadIdx.x < nr) { f2 = Bdata[jstart + j2 + threadIdx.x]; r2 = Bir[jstart + j2 + threadIdx.x]; } for (int k = 0; k < 32; k++) { float f2shift = __shfl(f2, k); int r2shift = __shfl(r2, k); if (j2 + k < nr && r1 >= 0) { int ind = mmhash2(r1, r2shift, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; doit = false; if (j1 + threadIdx.x == j2 + k) { doit = (rank < bound1); } else if (j1 + threadIdx.x < j2 + k) { prod *= f2shift; rank *= r2shift + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { for (int m = 0; m < nrows; m++) { float sum = A[m + nrows * i] * prod; // Do the product atomicAdd(&C[m + nrows * ind], sum); // atomicAdd(&C[0], sum); } } else { for (int m = 0; m < nrows; m++) { float sum = A[m + nrows * ind] * prod; // Do the product atomicAdd(&C[m + nrows * i], sum); // atomicAdd(&C[0], sum); } } } } } } } } } #else __global__ void __hashmult2(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {} #endif int hashmult(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) { if (nrows >= 0) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); __hashmult<<<nblocks,threadDim>>>(nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, C, transpose); } else { dim3 threadDim(32, 1, 1); int nblocks = min(MAXXGRID, ncols); __hashmult2<<<nblocks,threadDim>>>(nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, C, transpose); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __forceinline__ __device__ void __gupdate(float grad, int i, int ithere, int jthere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { float lr, ve, te, pve, ste, ngrad; Sumsq[ithere] += grad * grad + epsilon; if (addgrad) { lr = (lrlen > 1) ? lrate[i] : lrate[0]; ve = (vexplen > 1) ? vexp[i] : vexp[0]; te = (texplen > 1) ? texp[i] : texp[0]; pve = (ve == 0) ? 1.0f : pow(Sumsq[ithere] * istep, ve); ste = pow(istep, te); ngrad = grad * lr * ste / pve; atomicAdd(&MM[ithere], ngrad); } if (Mask != NULL) { if (maskrows > 1) { if (Mask[ithere] == 0) MM[ithere] = 0; } else { if (Mask[jthere] == 0) MM[ithere] = 0; } } } __global__ void __hashmultADAGrad(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { bool doit = false; int ihere, ithere, jthere; float grad; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart = Bjc[i]; // Range of nz rows in this column int jend = Bjc[i+1]; int nr = jend - jstart; // Number of nz rows int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs) for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = solve1(j); // Compute the first and second indices int j2 = j - j1*(j1+1)/2; // int j1, j2; // solvex(todo, j, j1, j2); float f1 = Bdata[jstart + j1]; // Get the two features float f2 = Bdata[jstart + j2]; int r1 = Bir[jstart + j1]; // And their row indices int r2 = Bir[jstart + j2]; int ind = mmhash2(r1, r2, nfeats); // Hash the indices long long rank = r1 + 1; float prod = f1; if (j1 == j2) { doit = (rank < bound1); } else { prod *= f2; rank *= r2 + 1; doit = (rank < bound2); } if (doit) { if (transpose > 0) { ihere = threadIdx.x + nrows * i; ithere = threadIdx.x + nrows * ind; jthere = ind; } else { ithere = threadIdx.x + nrows * i; jthere = i; ihere = threadIdx.x + nrows * ind; } grad = A[ihere] * prod; // raw gradient __gupdate(grad, threadIdx.x, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } int hashmultADAGrad(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); __hashmultADAGrad<<<nblocks,threadDim>>>(nrows, nfeats, ncols, bound1, bound2, A, Bdata, Bir, Bjc, transpose, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) { int r1, r2, ind; int istart = ((long long)blockIdx.x) * ncols/ gridDim.x; int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; for (int i = istart; i < iend ; i++) { // i is the column index int jstart1 = Bjc[i]; // Range of nz rows in this column of B int jend1 = Bjc[i+1]; int jstart2 = Cjc[i]; // Range of nz rows in this column of C int jend2 = Cjc[i+1]; int nr1 = jend1 - jstart1; // Number of nz rows int nr2 = jend2 - jstart2; // Number of nz rows int todo = (nr1+1) * (nr2+1) - 1; // Number of pairs + singletons to process for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column int j1 = j / nr2; int j2 = j - j1 * nr2; float prod = 1.0f; int hash = seed; if (j1 < nr1) { prod *= Bdata[jstart1 + j1]; // Get the two features r1 = Bir[jstart1 + j1]; // And their row indices hash = h1(r1, hash); } if (j2 < nr2) { prod *= Cdata[jstart2 + j2]; r2 = Cir[jstart2 + j2]; hash = h1(r2, hash); // Hash the indices } ind = mmhashend(hash, nfeats); if (transpose > 0) { float sum = A[threadIdx.x + nrows * i] * prod; // Do the product atomicAdd(&D[threadIdx.x + nrows * ind], sum); } else { float sum = A[threadIdx.x + nrows * ind] * prod; atomicAdd(&D[threadIdx.x + nrows * i], sum); } } } } int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) { int nt = max(1, 256/nrows); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, ncols); __hashcross<<<nblocks,threadDim>>>(nrows, nfeats, ncols, A, Bdata, Bir, Bjc, Cdata, Cir, Cjc, D, transpose); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; }
1a9d185dde7694f251074bc691bcbf552815e790.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <wrapper/app/mad/mad_enactor.cuh> #include <wrapper/app/mad/mad_problem.cuh> #include <wrapper/app/mad/mad_functor.cuh> #include <wrapper/util/util.h> using namespace wrapper; using namespace wrapper::util; using namespace wrapper::cuda; using namespace wrapper::app::mad; template< typename Value> void RunTests( Value *origin_elements, int num_elements) { typedef MADProblem< Value> Problem; Value *h_results = (Value*)malloc(sizeof(Value)*num_elements); MADEnactor mad_enactor; Problem *simple_problem = new Problem; simple_problem->Init(num_elements, origin_elements); mad_enactor.Enact<Problem>(simple_problem); simple_problem->Extract(h_results); printf("results:"); for (int i = 0; i < num_elements; ++i) { printf("%5f, ", h_results[i]); } printf("\n"); if (h_results) free(h_results); if (simple_problem) delete simple_problem; hipDeviceSynchronize(); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); DeviceInit(args); hipSetDeviceFlags(hipDeviceMapHost); int num_elements = 1; args.GetCmdLineArgument("num", num_elements); typedef float Value; Value *h_origins = (Value*)malloc(sizeof(Value)*num_elements); printf("origin data:"); for (int i = 0; i < num_elements; ++i) { h_origins[i] = i; printf("%5f, ", h_origins[i]); } printf("\n"); RunTests(h_origins, num_elements); return 0; }
1a9d185dde7694f251074bc691bcbf552815e790.cu
#include <stdio.h> #include <math.h> #include <wrapper/app/mad/mad_enactor.cuh> #include <wrapper/app/mad/mad_problem.cuh> #include <wrapper/app/mad/mad_functor.cuh> #include <wrapper/util/util.h> using namespace wrapper; using namespace wrapper::util; using namespace wrapper::cuda; using namespace wrapper::app::mad; template< typename Value> void RunTests( Value *origin_elements, int num_elements) { typedef MADProblem< Value> Problem; Value *h_results = (Value*)malloc(sizeof(Value)*num_elements); MADEnactor mad_enactor; Problem *simple_problem = new Problem; simple_problem->Init(num_elements, origin_elements); mad_enactor.Enact<Problem>(simple_problem); simple_problem->Extract(h_results); printf("results:"); for (int i = 0; i < num_elements; ++i) { printf("%5f, ", h_results[i]); } printf("\n"); if (h_results) free(h_results); if (simple_problem) delete simple_problem; cudaDeviceSynchronize(); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); DeviceInit(args); cudaSetDeviceFlags(cudaDeviceMapHost); int num_elements = 1; args.GetCmdLineArgument("num", num_elements); typedef float Value; Value *h_origins = (Value*)malloc(sizeof(Value)*num_elements); printf("origin data:"); for (int i = 0; i < num_elements; ++i) { h_origins[i] = i; printf("%5f, ", h_origins[i]); } printf("\n"); RunTests(h_origins, num_elements); return 0; }
397b932153b05ac78ccf7fdacfd73be95a39f09d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <ATen/DeviceGuard.h> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_gpu(const at::Tensor& boxes, float nms_overlap_thresh, int64_t top_k) { at::DeviceGuard guard(boxes.device()); using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); int boxes_num = ::min(boxes.size(0), top_k); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = reinterpret_cast<unsigned long long*>(THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long))); // unsigned long long* mask_dev = NULL; // THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)}).sort(0, false)); }
397b932153b05ac78ccf7fdacfd73be95a39f09d.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <ATen/DeviceGuard.h> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_gpu(const at::Tensor& boxes, float nms_overlap_thresh, int64_t top_k) { at::DeviceGuard guard(boxes.device()); using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); int boxes_num = std::min(boxes.size(0), top_k); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = reinterpret_cast<unsigned long long*>(THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long))); // unsigned long long* mask_dev = NULL; // THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)}).sort(0, false)); }
cba1065c260848c3c2aba1d710197b5299337acc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/fil/fil.h> #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <limits> #include <memory> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include <utility> #include "../../src/fil/internal.cuh" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; using namespace fil; struct FilTestParams { // input data parameters int num_rows = 20'000; int num_cols = 50; float nan_prob = 0.05; // forest parameters int depth = 8; int num_trees = 50; float leaf_prob = 0.05; // output parameters output_t output = output_t::RAW; float threshold = 0.0f; float global_bias = 0.0f; // runtime parameters int blocks_per_sm = 0; algo_t algo = algo_t::NAIVE; int seed = 42; float tolerance = 2e-3f; // treelite parameters, only used for treelite tests tl::Operator op = tl::Operator::kLT; leaf_algo_t leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; // when FLOAT_UNARY_BINARY == leaf_algo: // num_classes = 1 means it's regression // num_classes = 2 means it's binary classification // (complement probabilities, then use threshold) // when GROVE_PER_CLASS == leaf_algo: // it's multiclass classification (num_classes must be > 2), // done by splitting the forest in num_classes groups, // each of which computes one-vs-all probability for its class. // when CATEGORICAL_LEAF == leaf_algo: // num_classes must be > 1 and it's multiclass classification. // done by storing the class label in each leaf and voting. // it's used in treelite ModelBuilder initialization int num_classes = 1; size_t num_proba_outputs() { return num_rows * ::max(num_classes, 2); } size_t num_preds_outputs() { return num_rows; } }; std::string output2str(fil::output_t output) { if (output == fil::RAW) return "RAW"; std::string s = ""; if (output & fil::AVG) s += "| AVG"; if (output & fil::CLASS) s += "| CLASS"; if (output & fil::SIGMOID) s += "| SIGMOID"; return s; } std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold << ", blocks_per_sm = " << ps.blocks_per_sm << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo << ", num_classes = " << ps.num_classes; return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void setup_helper() { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void SetUp() override { setup_helper(); } void TearDown() override { CUDA_CHECK(hipFree(preds_d)); CUDA_CHECK(hipFree(want_preds_d)); CUDA_CHECK(hipFree(data_d)); CUDA_CHECK(hipFree(want_proba_d)); CUDA_CHECK(hipFree(proba_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data /// weights, used as float* or int* int* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data raft::allocate(weights_d, num_nodes); // sizeof(float) == sizeof(int) raft::allocate(thresholds_d, num_nodes); raft::allocate(fids_d, num_nodes); raft::allocate(def_lefts_d, num_nodes); raft::allocate(is_leafs_d, num_nodes); // generate on-GPU random data raft::random::Rng r(ps.seed); if (ps.leaf_algo != fil::leaf_algo_t::CATEGORICAL_LEAF) { r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream); } else { // [0..num_classes) r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream); } r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> thresholds_h(num_nodes); std::vector<int> weights_h(num_nodes), fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream); raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream); raft::update_host(fids_h.data(), fids_d, num_nodes, stream); raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream); raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::val_t w; switch (ps.leaf_algo) { case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break; case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // not relying on fil::val_t internals // merely that we copied floats into weights_h earlier std::memcpy(&w.f, &weights_h[i], sizeof w.f); break; default: ASSERT(false, "internal error: invalid ps.leaf_algo"); } nodes[i] = fil::dense_node(w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(hipFree(is_leafs_d)); CUDA_CHECK(hipFree(def_lefts_d)); CUDA_CHECK(hipFree(fids_d)); CUDA_CHECK(hipFree(thresholds_d)); CUDA_CHECK(hipFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; raft::allocate(data_d, num_data); bool* mask_d = nullptr; raft::allocate(mask_d, num_data); // generate random data raft::random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; hipLaunchKernelGGL(( nan_kernel), dim3(raft::ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream, data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); // copy to host data_h.resize(num_data); raft::update_host(data_h.data(), data_d, num_data, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // clean up CUDA_CHECK(hipFree(mask_d)); } void transform(float f, float& proba, float& output) { if ((ps.output & fil::output_t::AVG) != 0) { f *= (1.0f / ps.num_trees); } f += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); } proba = f; if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; } output = f; } void complement(float* proba) { proba[0] = 1.0f - proba[1]; } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_preds_outputs()); std::vector<float> want_proba_h(ps.num_proba_outputs()); int num_nodes = tree_num_nodes(); std::vector<float> class_scores(ps.num_classes); switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f; } transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]); complement(&(want_proba_h[i * 2])); } break; case fil::leaf_algo_t::GROVE_PER_CLASS: for (int row = 0; row < ps.num_rows; ++row) { std::fill(class_scores.begin(), class_scores.end(), 0.0f); for (int tree = 0; tree < ps.num_trees; ++tree) { class_scores[tree % ps.num_classes] += infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]) .f; } // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) want_preds_h[row] = std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin(); } break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<int> class_votes(ps.num_classes); for (int r = 0; r < ps.num_rows; ++r) { std::fill(class_votes.begin(), class_votes.end(), 0); for (int j = 0; j < ps.num_trees; ++j) { int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]) .idx; ++class_votes[class_label]; } for (int c = 0; c < ps.num_classes; ++c) { float thresholded_proba; // not used; do argmax instead transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba); } want_preds_h[r] = std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin(); } break; } // copy to GPU raft::allocate(want_preds_d, ps.num_preds_outputs()); raft::allocate(want_proba_d, ps.num_proba_outputs()); raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream); raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream); CUDA_CHECK(hipStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict raft::allocate(preds_d, ps.num_preds_outputs()); raft::allocate(proba_d, ps.num_proba_outputs()); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true); CUDA_CHECK(hipStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) { ASSERT_TRUE( raft::devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(), raft::CompareApprox<float>(ps.tolerance), stream)); } float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? ps.tolerance : std::numeric_limits<float>::epsilon(); // in multi-class prediction, floats represent the most likely class // and would be generated by converting an int to float ASSERT_TRUE(raft::devArrMatch(want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream)); } fil::val_t infer_one_tree(fil::dense_node* root, float* data) { int curr = 0; fil::val_t output{.f = 0.0f}; for (;;) { const fil::dense_node& node = root[curr]; if (node.is_leaf()) return node.base_node::output<val_t>(); float val = data[node.fid()]; bool cond = isnan(val) ? !node.def_left() : val >= node.thresh(); curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* proba_d = nullptr; float* want_preds_d = nullptr; float* want_proba_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node> nodes; // parameters hipStream_t stream; raft::handle_t handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil_ps.leaf_algo = ps.leaf_algo; fil_ps.num_classes = ps.num_classes; fil_ps.blocks_per_sm = ps.blocks_per_sm; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; template <typename fil_node_t> class BasePredictSparseFilTest : public BaseFilTest { protected: void dense2sparse_node(const fil::dense_node* dense_root, int i_dense, int i_sparse_root, int i_sparse) { const fil::dense_node& node = dense_root[i_dense]; if (node.is_leaf()) { // leaf sparse node sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(), node.thresh(), node.fid(), node.def_left(), node.is_leaf(), 0); return; } // inner sparse node // reserve space for children int left_index = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); sparse_nodes.push_back(fil_node_t()); sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(), node.thresh(), node.fid(), node.def_left(), node.is_leaf(), left_index - i_sparse_root); dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index); dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1); } void dense2sparse_tree(const fil::dense_node* dense_root) { int i_sparse_root = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root); trees.push_back(i_sparse_root); } void dense2sparse() { for (int tree = 0; tree < ps.num_trees; ++tree) { dense2sparse_tree(&nodes[tree * tree_num_nodes()]); } } void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_params; fil_params.num_trees = ps.num_trees; fil_params.num_cols = ps.num_cols; fil_params.algo = ps.algo; fil_params.output = ps.output; fil_params.threshold = ps.threshold; fil_params.global_bias = ps.global_bias; fil_params.leaf_algo = ps.leaf_algo; fil_params.num_classes = ps.num_classes; fil_params.blocks_per_sm = ps.blocks_per_sm; dense2sparse(); fil_params.num_nodes = sparse_nodes.size(); fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params); } std::vector<fil_node_t> sparse_nodes; std::vector<int> trees; }; typedef BasePredictSparseFilTest<fil::sparse_node16> PredictSparse16FilTest; typedef BasePredictSparseFilTest<fil::sparse_node8> PredictSparse8FilTest; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; builder->CreateNode(key); const fil::dense_node& dense_node = nodes[node]; if (dense_node.is_leaf()) { switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // default is fil::FLOAT_UNARY_BINARY builder->SetLeafNode( key, tlf::Value::Create(dense_node.base_node::output<val_t>().f)); break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<tlf::Value> vec(ps.num_classes); for (int i = 0; i < ps.num_classes; ++i) { vec[i] = tlf::Value::Create( i == dense_node.base_node::output<val_t>().idx ? 1.0f : 0.0f); } builder->SetLeafVectorNode(key, vec); } } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; float threshold = dense_node.thresh(); bool default_left = dense_node.def_left(); switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); builder->SetNumericalTestNode(key, dense_node.fid(), ps.op, tlf::Value::Create(threshold), default_left, left_key, right_key); } return key; } void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type) { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; int treelite_num_classes = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes; std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder( ps.num_cols, treelite_num_classes, random_forest_flag, tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) { model_builder->SetModelParam("pred_transform", "max_index"); ps.output = fil::output_t(ps.output | fil::output_t::CLASS); } else { model_builder->SetModelParam("pred_transform", "identity"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); ::free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); tree_builder->SetRootNode(root_key); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model = model_builder->CommitModel(); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::CLASS) != 0; params.storage_type = storage_type; params.blocks_per_sm = ps.blocks_per_sm; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(hipStreamSynchronize(stream)); } }; class TreeliteDenseFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::DENSE); } }; class TreeliteSparse16FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE); } }; class TreeliteSparse8FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE8); } }; class TreeliteAutoFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::AUTO); } }; // test for failures; currently only supported for sparse8 nodes class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest { protected: // model import happens in check(), so this function is empty void SetUp() override {} void check() { ASSERT_THROW(setup_helper(), raft::exception); } }; /** mechanism to use named aggregate initialization before C++20, and also use the struct defaults. Using it directly only works if all defaulted members come after ones explicitly mentioned. **/ #define FIL_TEST_PARAMS(...) \ []() { \ struct NonDefaultFilTestParams : public FilTestParams { \ NonDefaultFilTestParams() { __VA_ARGS__; } \ }; \ return FilTestParams(NonDefaultFilTestParams()); \ }() // kEQ is intentionally unused, and kLT is default static const tl::Operator kLE = tl::Operator::kLE; static const tl::Operator kGT = tl::Operator::kGT; static const tl::Operator kGE = tl::Operator::kGE; std::vector<FilTestParams> predict_dense_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(algo = TREE_REORG), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID), FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = AVG), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = SIGMOID, leaf_algo = CATEGORICAL_LEAF, num_classes = 7), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 49, output = SIGMOID, leaf_algo = GROVE_PER_CLASS, num_classes = 7), FIL_TEST_PARAMS(num_trees = 52, global_bias = 0.5, algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(num_trees = 52, output = AVG, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(blocks_per_sm = 1), FIL_TEST_PARAMS(blocks_per_sm = 4), FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 1, leaf_algo = CATEGORICAL_LEAF), FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 4, leaf_algo = CATEGORICAL_LEAF), FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 1, leaf_algo = GROVE_PER_CLASS), FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 4, leaf_algo = GROVE_PER_CLASS), FIL_TEST_PARAMS(leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 1, num_trees = 512, num_classes = 512), FIL_TEST_PARAMS(leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 4, num_trees = 512, num_classes = 512), FIL_TEST_PARAMS(num_cols = 100'000, depth = 5, num_trees = 1, leaf_algo = FLOAT_UNARY_BINARY), FIL_TEST_PARAMS(num_rows = 101, num_cols = 100'000, depth = 5, num_trees = 3, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(num_rows = 102, num_cols = 100'000, depth = 5, num_trees = FIL_TPB + 1, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = FIL_TPB + 1), FIL_TEST_PARAMS(num_rows = 103, num_cols = 100'000, depth = 5, num_trees = 1, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); std::vector<FilTestParams> predict_sparse_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID), FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = AVG), FIL_TEST_PARAMS(output = AVG_CLASS, global_bias = 0.5, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = ALGO_AUTO, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 5000), FIL_TEST_PARAMS(global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = CLASS, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(depth = 2, num_trees = 5000, output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 5000), FIL_TEST_PARAMS(num_trees = 60, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 6), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(num_trees = 51, leaf_algo = GROVE_PER_CLASS, num_classes = 3), }; TEST_P(PredictSparse16FilTest, Predict) { compare(); } // Temporarily disabled, see https://github.com/rapidsai/cuml/issues/3205 INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs)); TEST_P(PredictSparse8FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs)); std::vector<FilTestParams> import_dense_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID, op = kLE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG, op = kGE), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(algo = TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG, op = kGT), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, num_classes = 2), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGT), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGT), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGE), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, op = kLE, num_classes = 2), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 7), FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = CLASS, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 48, output = CLASS, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = GROVE_PER_CLASS, num_classes = 6), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(output = CLASS, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 49, output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 7), FIL_TEST_PARAMS(num_trees = 48, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 6), }; TEST_P(TreeliteDenseFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs)); std::vector<FilTestParams> import_sparse_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID, op = kLE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG, op = kGE), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, op = kLE), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, num_classes = 2), FIL_TEST_PARAMS(algo = ALGO_AUTO), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 10), FIL_TEST_PARAMS(output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(output = CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, leaf_algo = GROVE_PER_CLASS, num_classes = 10), FIL_TEST_PARAMS(num_trees = 52, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(output = CLASS, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 3), }; TEST_P(TreeliteSparse16FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs)); TEST_P(TreeliteSparse8FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs)); std::vector<FilTestParams> import_auto_inputs = { FIL_TEST_PARAMS(depth = 10, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 15, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 19, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 19, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(depth = 10, output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(depth = 10, num_trees = 51, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 3), #if 0 FIL_TEST_PARAMS(depth = 19, output = AVG, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), #endif }; TEST_P(TreeliteAutoFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs)); // adjust test parameters if the sparse8 format changes std::vector<FilTestParams> import_throw_sparse8_inputs = { // too many features FIL_TEST_PARAMS(num_rows = 100, num_cols = 20000, depth = 10), // too many tree nodes FIL_TEST_PARAMS(depth = 16, num_trees = 5, leaf_prob = 0), }; TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest, testing::ValuesIn(import_throw_sparse8_inputs)); } // namespace ML
cba1065c260848c3c2aba1d710197b5299337acc.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/fil/fil.h> #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <limits> #include <memory> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include <utility> #include "../../src/fil/internal.cuh" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; using namespace fil; struct FilTestParams { // input data parameters int num_rows = 20'000; int num_cols = 50; float nan_prob = 0.05; // forest parameters int depth = 8; int num_trees = 50; float leaf_prob = 0.05; // output parameters output_t output = output_t::RAW; float threshold = 0.0f; float global_bias = 0.0f; // runtime parameters int blocks_per_sm = 0; algo_t algo = algo_t::NAIVE; int seed = 42; float tolerance = 2e-3f; // treelite parameters, only used for treelite tests tl::Operator op = tl::Operator::kLT; leaf_algo_t leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; // when FLOAT_UNARY_BINARY == leaf_algo: // num_classes = 1 means it's regression // num_classes = 2 means it's binary classification // (complement probabilities, then use threshold) // when GROVE_PER_CLASS == leaf_algo: // it's multiclass classification (num_classes must be > 2), // done by splitting the forest in num_classes groups, // each of which computes one-vs-all probability for its class. // when CATEGORICAL_LEAF == leaf_algo: // num_classes must be > 1 and it's multiclass classification. // done by storing the class label in each leaf and voting. // it's used in treelite ModelBuilder initialization int num_classes = 1; size_t num_proba_outputs() { return num_rows * std::max(num_classes, 2); } size_t num_preds_outputs() { return num_rows; } }; std::string output2str(fil::output_t output) { if (output == fil::RAW) return "RAW"; std::string s = ""; if (output & fil::AVG) s += "| AVG"; if (output & fil::CLASS) s += "| CLASS"; if (output & fil::SIGMOID) s += "| SIGMOID"; return s; } std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << output2str(ps.output) << ", threshold = " << ps.threshold << ", blocks_per_sm = " << ps.blocks_per_sm << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias << ", leaf_algo = " << ps.leaf_algo << ", num_classes = " << ps.num_classes; return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void setup_helper() { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void SetUp() override { setup_helper(); } void TearDown() override { CUDA_CHECK(cudaFree(preds_d)); CUDA_CHECK(cudaFree(want_preds_d)); CUDA_CHECK(cudaFree(data_d)); CUDA_CHECK(cudaFree(want_proba_d)); CUDA_CHECK(cudaFree(proba_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data /// weights, used as float* or int* int* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data raft::allocate(weights_d, num_nodes); // sizeof(float) == sizeof(int) raft::allocate(thresholds_d, num_nodes); raft::allocate(fids_d, num_nodes); raft::allocate(def_lefts_d, num_nodes); raft::allocate(is_leafs_d, num_nodes); // generate on-GPU random data raft::random::Rng r(ps.seed); if (ps.leaf_algo != fil::leaf_algo_t::CATEGORICAL_LEAF) { r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream); } else { // [0..num_classes) r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream); } r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> thresholds_h(num_nodes); std::vector<int> weights_h(num_nodes), fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; raft::update_host(weights_h.data(), (int*)weights_d, num_nodes, stream); raft::update_host(thresholds_h.data(), thresholds_d, num_nodes, stream); raft::update_host(fids_h.data(), fids_d, num_nodes, stream); raft::update_host(def_lefts_h, def_lefts_d, num_nodes, stream); raft::update_host(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::val_t w; switch (ps.leaf_algo) { case fil::leaf_algo_t::CATEGORICAL_LEAF: w.idx = weights_h[i]; break; case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // not relying on fil::val_t internals // merely that we copied floats into weights_h earlier std::memcpy(&w.f, &weights_h[i], sizeof w.f); break; default: ASSERT(false, "internal error: invalid ps.leaf_algo"); } nodes[i] = fil::dense_node(w, thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(cudaFree(is_leafs_d)); CUDA_CHECK(cudaFree(def_lefts_d)); CUDA_CHECK(cudaFree(fids_d)); CUDA_CHECK(cudaFree(thresholds_d)); CUDA_CHECK(cudaFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; raft::allocate(data_d, num_data); bool* mask_d = nullptr; raft::allocate(mask_d, num_data); // generate random data raft::random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; nan_kernel<<<raft::ceildiv(int(num_data), tpb), tpb, 0, stream>>>( data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); // copy to host data_h.resize(num_data); raft::update_host(data_h.data(), data_d, num_data, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // clean up CUDA_CHECK(cudaFree(mask_d)); } void transform(float f, float& proba, float& output) { if ((ps.output & fil::output_t::AVG) != 0) { f *= (1.0f / ps.num_trees); } f += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) { f = sigmoid(f); } proba = f; if ((ps.output & fil::output_t::CLASS) != 0) { f = f > ps.threshold ? 1.0f : 0.0f; } output = f; } void complement(float* proba) { proba[0] = 1.0f - proba[1]; } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_preds_outputs()); std::vector<float> want_proba_h(ps.num_proba_outputs()); int num_nodes = tree_num_nodes(); std::vector<float> class_scores(ps.num_classes); switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f; } transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]); complement(&(want_proba_h[i * 2])); } break; case fil::leaf_algo_t::GROVE_PER_CLASS: for (int row = 0; row < ps.num_rows; ++row) { std::fill(class_scores.begin(), class_scores.end(), 0.0f); for (int tree = 0; tree < ps.num_trees; ++tree) { class_scores[tree % ps.num_classes] += infer_one_tree(&nodes[tree * num_nodes], &data_h[row * ps.num_cols]) .f; } // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) want_preds_h[row] = std::max_element(class_scores.begin(), class_scores.end()) - class_scores.begin(); } break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<int> class_votes(ps.num_classes); for (int r = 0; r < ps.num_rows; ++r) { std::fill(class_votes.begin(), class_votes.end(), 0); for (int j = 0; j < ps.num_trees; ++j) { int class_label = infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols]) .idx; ++class_votes[class_label]; } for (int c = 0; c < ps.num_classes; ++c) { float thresholded_proba; // not used; do argmax instead transform(class_votes[c], want_proba_h[r * ps.num_classes + c], thresholded_proba); } want_preds_h[r] = std::max_element(class_votes.begin(), class_votes.end()) - class_votes.begin(); } break; } // copy to GPU raft::allocate(want_preds_d, ps.num_preds_outputs()); raft::allocate(want_proba_d, ps.num_proba_outputs()); raft::update_device(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(), stream); raft::update_device(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict raft::allocate(preds_d, ps.num_preds_outputs()); raft::allocate(proba_d, ps.num_proba_outputs()); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true); CUDA_CHECK(cudaStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { // not supporting predict_proba() with GROVE_PER_CLASS (xgboost-style models) if (ps.leaf_algo != fil::leaf_algo_t::GROVE_PER_CLASS) { ASSERT_TRUE( raft::devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(), raft::CompareApprox<float>(ps.tolerance), stream)); } float tolerance = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? ps.tolerance : std::numeric_limits<float>::epsilon(); // in multi-class prediction, floats represent the most likely class // and would be generated by converting an int to float ASSERT_TRUE(raft::devArrMatch(want_preds_d, preds_d, ps.num_rows, raft::CompareApprox<float>(tolerance), stream)); } fil::val_t infer_one_tree(fil::dense_node* root, float* data) { int curr = 0; fil::val_t output{.f = 0.0f}; for (;;) { const fil::dense_node& node = root[curr]; if (node.is_leaf()) return node.base_node::output<val_t>(); float val = data[node.fid()]; bool cond = isnan(val) ? !node.def_left() : val >= node.thresh(); curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* proba_d = nullptr; float* want_preds_d = nullptr; float* want_proba_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node> nodes; // parameters cudaStream_t stream; raft::handle_t handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil_ps.leaf_algo = ps.leaf_algo; fil_ps.num_classes = ps.num_classes; fil_ps.blocks_per_sm = ps.blocks_per_sm; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; template <typename fil_node_t> class BasePredictSparseFilTest : public BaseFilTest { protected: void dense2sparse_node(const fil::dense_node* dense_root, int i_dense, int i_sparse_root, int i_sparse) { const fil::dense_node& node = dense_root[i_dense]; if (node.is_leaf()) { // leaf sparse node sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(), node.thresh(), node.fid(), node.def_left(), node.is_leaf(), 0); return; } // inner sparse node // reserve space for children int left_index = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); sparse_nodes.push_back(fil_node_t()); sparse_nodes[i_sparse] = fil_node_t(node.base_node::output<val_t>(), node.thresh(), node.fid(), node.def_left(), node.is_leaf(), left_index - i_sparse_root); dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index); dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root, left_index + 1); } void dense2sparse_tree(const fil::dense_node* dense_root) { int i_sparse_root = sparse_nodes.size(); sparse_nodes.push_back(fil_node_t()); dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root); trees.push_back(i_sparse_root); } void dense2sparse() { for (int tree = 0; tree < ps.num_trees; ++tree) { dense2sparse_tree(&nodes[tree * tree_num_nodes()]); } } void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_params; fil_params.num_trees = ps.num_trees; fil_params.num_cols = ps.num_cols; fil_params.algo = ps.algo; fil_params.output = ps.output; fil_params.threshold = ps.threshold; fil_params.global_bias = ps.global_bias; fil_params.leaf_algo = ps.leaf_algo; fil_params.num_classes = ps.num_classes; fil_params.blocks_per_sm = ps.blocks_per_sm; dense2sparse(); fil_params.num_nodes = sparse_nodes.size(); fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(), &fil_params); } std::vector<fil_node_t> sparse_nodes; std::vector<int> trees; }; typedef BasePredictSparseFilTest<fil::sparse_node16> PredictSparse16FilTest; typedef BasePredictSparseFilTest<fil::sparse_node8> PredictSparse8FilTest; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; builder->CreateNode(key); const fil::dense_node& dense_node = nodes[node]; if (dense_node.is_leaf()) { switch (ps.leaf_algo) { case fil::leaf_algo_t::FLOAT_UNARY_BINARY: case fil::leaf_algo_t::GROVE_PER_CLASS: // default is fil::FLOAT_UNARY_BINARY builder->SetLeafNode( key, tlf::Value::Create(dense_node.base_node::output<val_t>().f)); break; case fil::leaf_algo_t::CATEGORICAL_LEAF: std::vector<tlf::Value> vec(ps.num_classes); for (int i = 0; i < ps.num_classes; ++i) { vec[i] = tlf::Value::Create( i == dense_node.base_node::output<val_t>().idx ? 1.0f : 0.0f); } builder->SetLeafVectorNode(key, vec); } } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; float threshold = dense_node.thresh(); bool default_left = dense_node.def_left(); switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); builder->SetNumericalTestNode(key, dense_node.fid(), ps.op, tlf::Value::Create(threshold), default_left, left_key, right_key); } return key; } void init_forest_impl(fil::forest_t* pforest, fil::storage_type_t storage_type) { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; int treelite_num_classes = ps.leaf_algo == fil::leaf_algo_t::FLOAT_UNARY_BINARY ? 1 : ps.num_classes; std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder( ps.num_cols, treelite_num_classes, random_forest_flag, tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } else if (ps.leaf_algo != fil::leaf_algo_t::FLOAT_UNARY_BINARY) { model_builder->SetModelParam("pred_transform", "max_index"); ps.output = fil::output_t(ps.output | fil::output_t::CLASS); } else { model_builder->SetModelParam("pred_transform", "identity"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); ::free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(tl::TypeInfo::kFloat32, tl::TypeInfo::kFloat32); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); tree_builder->SetRootNode(root_key); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model = model_builder->CommitModel(); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::CLASS) != 0; params.storage_type = storage_type; params.blocks_per_sm = ps.blocks_per_sm; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(cudaStreamSynchronize(stream)); } }; class TreeliteDenseFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::DENSE); } }; class TreeliteSparse16FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE); } }; class TreeliteSparse8FilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::SPARSE8); } }; class TreeliteAutoFilTest : public TreeliteFilTest { protected: void init_forest(fil::forest_t* pforest) override { init_forest_impl(pforest, fil::storage_type_t::AUTO); } }; // test for failures; currently only supported for sparse8 nodes class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest { protected: // model import happens in check(), so this function is empty void SetUp() override {} void check() { ASSERT_THROW(setup_helper(), raft::exception); } }; /** mechanism to use named aggregate initialization before C++20, and also use the struct defaults. Using it directly only works if all defaulted members come after ones explicitly mentioned. **/ #define FIL_TEST_PARAMS(...) \ []() { \ struct NonDefaultFilTestParams : public FilTestParams { \ NonDefaultFilTestParams() { __VA_ARGS__; } \ }; \ return FilTestParams(NonDefaultFilTestParams()); \ }() // kEQ is intentionally unused, and kLT is default static const tl::Operator kLE = tl::Operator::kLE; static const tl::Operator kGT = tl::Operator::kGT; static const tl::Operator kGE = tl::Operator::kGE; std::vector<FilTestParams> predict_dense_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(algo = TREE_REORG), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID), FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = AVG), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = SIGMOID, leaf_algo = CATEGORICAL_LEAF, num_classes = 7), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 49, output = SIGMOID, leaf_algo = GROVE_PER_CLASS, num_classes = 7), FIL_TEST_PARAMS(num_trees = 52, global_bias = 0.5, algo = TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(num_trees = 52, output = AVG, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(blocks_per_sm = 1), FIL_TEST_PARAMS(blocks_per_sm = 4), FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 1, leaf_algo = CATEGORICAL_LEAF), FIL_TEST_PARAMS(num_classes = 3, blocks_per_sm = 4, leaf_algo = CATEGORICAL_LEAF), FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 1, leaf_algo = GROVE_PER_CLASS), FIL_TEST_PARAMS(num_classes = 5, blocks_per_sm = 4, leaf_algo = GROVE_PER_CLASS), FIL_TEST_PARAMS(leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 1, num_trees = 512, num_classes = 512), FIL_TEST_PARAMS(leaf_algo = GROVE_PER_CLASS, blocks_per_sm = 4, num_trees = 512, num_classes = 512), FIL_TEST_PARAMS(num_cols = 100'000, depth = 5, num_trees = 1, leaf_algo = FLOAT_UNARY_BINARY), FIL_TEST_PARAMS(num_rows = 101, num_cols = 100'000, depth = 5, num_trees = 3, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(num_rows = 102, num_cols = 100'000, depth = 5, num_trees = FIL_TPB + 1, algo = BATCH_TREE_REORG, leaf_algo = GROVE_PER_CLASS, num_classes = FIL_TPB + 1), FIL_TEST_PARAMS(num_rows = 103, num_cols = 100'000, depth = 5, num_trees = 1, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); std::vector<FilTestParams> predict_sparse_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID), FIL_TEST_PARAMS(output = SIGMOID_CLASS, num_classes = 2), FIL_TEST_PARAMS(output = AVG), FIL_TEST_PARAMS(output = AVG_CLASS, global_bias = 0.5, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = ALGO_AUTO, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 5000), FIL_TEST_PARAMS(global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = CLASS, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(depth = 2, num_trees = 5000, output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 5000), FIL_TEST_PARAMS(num_trees = 60, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 6), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(num_trees = 51, leaf_algo = GROVE_PER_CLASS, num_classes = 3), }; TEST_P(PredictSparse16FilTest, Predict) { compare(); } // Temporarily disabled, see https://github.com/rapidsai/cuml/issues/3205 INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest, testing::ValuesIn(predict_sparse_inputs)); TEST_P(PredictSparse8FilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest, testing::ValuesIn(predict_sparse_inputs)); std::vector<FilTestParams> import_dense_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID, op = kLE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG, op = kGE), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(algo = TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = TREE_REORG, op = kGT), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, num_classes = 2), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGT), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGT), FIL_TEST_PARAMS(algo = BATCH_TREE_REORG, op = kGE), FIL_TEST_PARAMS(output = SIGMOID, algo = BATCH_TREE_REORG, op = kGE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID_CLASS, algo = BATCH_TREE_REORG, op = kLE, num_classes = 2), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG_CLASS, algo = BATCH_TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5, algo = TREE_REORG), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, algo = BATCH_TREE_REORG, op = kLE), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, algo = TREE_REORG, op = kGE, num_classes = 2), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE), FIL_TEST_PARAMS(output = SIGMOID, algo = ALGO_AUTO, op = kLE), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(output = AVG, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG_CLASS, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, algo = TREE_REORG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 7), FIL_TEST_PARAMS(output = AVG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), FIL_TEST_PARAMS(output = CLASS, algo = BATCH_TREE_REORG, op = kGE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 48, output = CLASS, algo = BATCH_TREE_REORG, op = kGT, leaf_algo = GROVE_PER_CLASS, num_classes = 6), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 3), FIL_TEST_PARAMS(output = CLASS, algo = BATCH_TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 49, output = CLASS, algo = TREE_REORG, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 7), FIL_TEST_PARAMS(num_trees = 48, output = CLASS, leaf_algo = GROVE_PER_CLASS, num_classes = 6), }; TEST_P(TreeliteDenseFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest, testing::ValuesIn(import_dense_inputs)); std::vector<FilTestParams> import_sparse_inputs = { FIL_TEST_PARAMS(), FIL_TEST_PARAMS(output = SIGMOID, op = kLE), FIL_TEST_PARAMS(output = SIGMOID_CLASS, op = kGT, num_classes = 2), FIL_TEST_PARAMS(output = AVG, op = kGE), FIL_TEST_PARAMS(output = AVG_CLASS, num_classes = 2), FIL_TEST_PARAMS(global_bias = 0.5), FIL_TEST_PARAMS(output = SIGMOID, global_bias = 0.5, op = kLE), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, op = kGT), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, num_classes = 2), FIL_TEST_PARAMS(algo = ALGO_AUTO), FIL_TEST_PARAMS(output = AVG_CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, leaf_algo = CATEGORICAL_LEAF, num_classes = 10), FIL_TEST_PARAMS(output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 4), FIL_TEST_PARAMS(output = AVG, op = kLE, leaf_algo = CATEGORICAL_LEAF, num_classes = 5), FIL_TEST_PARAMS(output = AVG, global_bias = 0.5, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(output = CLASS, threshold = 1.0, global_bias = 0.5, op = kGE, leaf_algo = GROVE_PER_CLASS, num_classes = 10), FIL_TEST_PARAMS(num_trees = 52, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 4), FIL_TEST_PARAMS(output = CLASS, op = kLE, leaf_algo = GROVE_PER_CLASS, num_classes = 5), FIL_TEST_PARAMS(num_trees = 51, output = CLASS, global_bias = 0.5, leaf_algo = GROVE_PER_CLASS, num_classes = 3), }; TEST_P(TreeliteSparse16FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest, testing::ValuesIn(import_sparse_inputs)); TEST_P(TreeliteSparse8FilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest, testing::ValuesIn(import_sparse_inputs)); std::vector<FilTestParams> import_auto_inputs = { FIL_TEST_PARAMS(depth = 10, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 15, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 19, algo = ALGO_AUTO), FIL_TEST_PARAMS(depth = 19, algo = BATCH_TREE_REORG), FIL_TEST_PARAMS(depth = 10, output = AVG, algo = ALGO_AUTO, leaf_algo = CATEGORICAL_LEAF, num_classes = 3), FIL_TEST_PARAMS(depth = 10, num_trees = 51, output = CLASS, algo = ALGO_AUTO, leaf_algo = GROVE_PER_CLASS, num_classes = 3), #if 0 FIL_TEST_PARAMS(depth = 19, output = AVG, algo = BATCH_TREE_REORG, leaf_algo = CATEGORICAL_LEAF, num_classes = 6), #endif }; TEST_P(TreeliteAutoFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest, testing::ValuesIn(import_auto_inputs)); // adjust test parameters if the sparse8 format changes std::vector<FilTestParams> import_throw_sparse8_inputs = { // too many features FIL_TEST_PARAMS(num_rows = 100, num_cols = 20000, depth = 10), // too many tree nodes FIL_TEST_PARAMS(depth = 16, num_trees = 5, leaf_prob = 0), }; TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest, testing::ValuesIn(import_throw_sparse8_inputs)); } // namespace ML
8ced22a1fe6b9b83d29d5d367dadd24f5ec09044.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <malloc.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> void Mul(float *A, float *B, int hA, int wA, int wB, float *C) { int i,j,k; for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C[i*wB+j] = 0.0; for (k=0; k<wA; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } __global__ void MulGpu(float *A, float *B, float *C, int hA, int wA, int wB) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if(i < hA && j < wB) for(int k = 0; k < wA ; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } void init_matrix(float *M, int hM, int wM, float k) { int i,j; for (i=0; i<hM; i++) for (j=0; j<wM; j++) if (i==j) M[i*wM+j] = k*1.0f; else M[i*wM+j] = -1.0f/(float)(wM); } void print_matrix(float *M, int hM, int wM) { int i,j; for (i=0; i<hM; i++){ // printf("Line %i: ", i); for (j=0; j<wM; j++) printf("%4.1f ", M[i*wM+j]); printf("\n"); } } int diff(float *A, float *B, int hA, int wA, int wB, float *C) { float *C_cpu; int size_C = wB * hA; C_cpu = (float*)malloc(size_C*sizeof(float)); int i,j,k; for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C_cpu[i*wB+j] = 0.0; for (k=0; k<wA; k++){ C_cpu[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } //printf("\n\nMATRIX C_cpu\n");print_matrix(C_cpu, hA, wB); for (i=0; i<hA; i++) for (j=0; j<wB; j++) if (fabsf(C_cpu[i*wB+j]-C[i*wB+j])>1e-5) { printf("[%i,%i]: %f!=%f\n", i, j, C_cpu[i*wB+j], C[i*wB+j]); return(0); } return(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Matrix variables float *A, *B, *C; float *A_G, *B_G, *C_G; int hA, wA, hB, wB; int i; setbuf(stdout, NULL); if (argc!=4){ printf("./exec hA hB/WA wB\n"); exit(-1); } hA = atoi(argv[1]); hB = wA = atoi(argv[2]); wB = atoi(argv[3]); // Init A and B, malloc C int size_A = wA * hA; A = (float*)malloc(size_A*sizeof(float)); init_matrix(A, hA, wA, 1.0); int size_B = wB * hB; B = (float*)malloc(size_B*sizeof(float)); init_matrix(B, hB, wB, 2.0); int size_C = wB * hA; C = (float*)malloc(size_C*sizeof(float)); for (i = 0; i < (hA*wB); i++) { C[i] = 0.0; } hipMalloc((void**) &A_G, sizeof (float)*hA*wA); hipMalloc((void**) &B_G, sizeof (float)*hB*wB); hipMalloc((void**) &C_G, sizeof (float)*hA*wB); hipMemcpy(A_G,A,sizeof (float)*hA*wA,hipMemcpyHostToDevice); hipMemcpy(B_G,B,sizeof (float)*hB*wB,hipMemcpyHostToDevice); hipMemcpy(C_G,C,sizeof (float)*hA*wB,hipMemcpyHostToDevice); dim3 nThreads_per_block(32,32); dim3 nBlocks; if ((hA*wB)%32==0) nBlocks = dim3((hA*wB)/32,(hA*wB)/32); else nBlocks = dim3((hA*wB)/32 +1,(hA*wB)/32 +1); hipLaunchKernelGGL(( MulGpu), dim3(nBlocks),dim3(nThreads_per_block), 0, 0, A_G, B_G, C_G, hA, wA, wB); //Mul(A, B, hA, wA, wB, C); hipMemcpy(C,C_G,sizeof (float)*hA*wB,hipMemcpyDeviceToHost); //printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); //printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); //printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); if (!diff(A, B, hA, wA, wB, C)) printf("ERROR=GPU.vs.CPU matrix mult differs\n"); // print Matrix //printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); //printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); //printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); return (1); }
8ced22a1fe6b9b83d29d5d367dadd24f5ec09044.cu
#include <stdio.h> #include <malloc.h> #include <stdlib.h> #include <math.h> #include <cuda.h> void Mul(float *A, float *B, int hA, int wA, int wB, float *C) { int i,j,k; for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C[i*wB+j] = 0.0; for (k=0; k<wA; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } __global__ void MulGpu(float *A, float *B, float *C, int hA, int wA, int wB) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if(i < hA && j < wB) for(int k = 0; k < wA ; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } void init_matrix(float *M, int hM, int wM, float k) { int i,j; for (i=0; i<hM; i++) for (j=0; j<wM; j++) if (i==j) M[i*wM+j] = k*1.0f; else M[i*wM+j] = -1.0f/(float)(wM); } void print_matrix(float *M, int hM, int wM) { int i,j; for (i=0; i<hM; i++){ // printf("Line %i: ", i); for (j=0; j<wM; j++) printf("%4.1f ", M[i*wM+j]); printf("\n"); } } int diff(float *A, float *B, int hA, int wA, int wB, float *C) { float *C_cpu; int size_C = wB * hA; C_cpu = (float*)malloc(size_C*sizeof(float)); int i,j,k; for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C_cpu[i*wB+j] = 0.0; for (k=0; k<wA; k++){ C_cpu[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } //printf("\n\nMATRIX C_cpu\n");print_matrix(C_cpu, hA, wB); for (i=0; i<hA; i++) for (j=0; j<wB; j++) if (fabsf(C_cpu[i*wB+j]-C[i*wB+j])>1e-5) { printf("[%i,%i]: %f!=%f\n", i, j, C_cpu[i*wB+j], C[i*wB+j]); return(0); } return(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Matrix variables float *A, *B, *C; float *A_G, *B_G, *C_G; int hA, wA, hB, wB; int i; setbuf(stdout, NULL); if (argc!=4){ printf("./exec hA hB/WA wB\n"); exit(-1); } hA = atoi(argv[1]); hB = wA = atoi(argv[2]); wB = atoi(argv[3]); // Init A and B, malloc C int size_A = wA * hA; A = (float*)malloc(size_A*sizeof(float)); init_matrix(A, hA, wA, 1.0); int size_B = wB * hB; B = (float*)malloc(size_B*sizeof(float)); init_matrix(B, hB, wB, 2.0); int size_C = wB * hA; C = (float*)malloc(size_C*sizeof(float)); for (i = 0; i < (hA*wB); i++) { C[i] = 0.0; } cudaMalloc((void**) &A_G, sizeof (float)*hA*wA); cudaMalloc((void**) &B_G, sizeof (float)*hB*wB); cudaMalloc((void**) &C_G, sizeof (float)*hA*wB); cudaMemcpy(A_G,A,sizeof (float)*hA*wA,cudaMemcpyHostToDevice); cudaMemcpy(B_G,B,sizeof (float)*hB*wB,cudaMemcpyHostToDevice); cudaMemcpy(C_G,C,sizeof (float)*hA*wB,cudaMemcpyHostToDevice); dim3 nThreads_per_block(32,32); dim3 nBlocks; if ((hA*wB)%32==0) nBlocks = dim3((hA*wB)/32,(hA*wB)/32); else nBlocks = dim3((hA*wB)/32 +1,(hA*wB)/32 +1); MulGpu<<<nBlocks,nThreads_per_block>>>(A_G, B_G, C_G, hA, wA, wB); //Mul(A, B, hA, wA, wB, C); cudaMemcpy(C,C_G,sizeof (float)*hA*wB,cudaMemcpyDeviceToHost); //printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); //printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); //printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); if (!diff(A, B, hA, wA, wB, C)) printf("ERROR=GPU.vs.CPU matrix mult differs\n"); // print Matrix //printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); //printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); //printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); return (1); }
2d418100143a772f2220b40e97c6f0739430f4ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <iomanip> #include <stdio.h> #include <math.h> #include <vector> #include <fstream> #include "random" #define space 15 #define dim1 100 //X #define dim2 100 //Y #define dim3 100 //Z #define N ( 100000 ) #define M ( 512 ) #define incr 1 #define rad_circles 1 #define num_circles 1000 #define FOR(i,n,inc) for( i = -n; i < n; i=i+inc) //int idx_radius = 0; using namespace std; struct point3D{ double x, y,z; point3D( double _x, double _y, double _z){ x = _x; y = _y; z = _z; } }; vector<point3D> v_point; //vector of points vector<point3D> v_circles; //vector of circles' center ofstream mb_file("data.obj"); //double thresh = 0.5; __global__ void add( point3D* points, point3D* circles, point3D* intersection){ int index = threadIdx.x + blockIdx.x*blockDim.x; //if( circles[index].x != -1 && circles[index].y != -1 && circles[index].z != -1 ){ double sum = 0.0;//, sum2 = 0.0; double dif_x, dif_y, dif_z, value=0.0, thresh = 0.2; for( int i = 0 ; i < num_circles; i++){ dif_x = (points[index].x - circles[i].x); dif_y = (points[index].y - circles[i].y); dif_z = (points[index].z - circles[i].z); //printf("%f %f %f %c", points[index].x, points[index].y, points[index].z, '\n'); value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); //sum = value * value; //printf("%d\n", value); if( value <= rad_circles ){ value = 1 - ( (value*value) / (rad_circles*rad_circles) ); sum = sum + (value * value); if( sum > thresh ){ intersection[index].x = points[index].x; intersection[index].y = points[index].y; intersection[index].z = points[index].z; } } /*dif_x = intersection[index].x - circles[i].x; dif_y = intersection[index].y - circles[i].y; dif_z = intersection[index].z - circles[i].z; value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); if( value <= rad_circles ){ value = 1 - ( (value * value) / (rad_circles * rad_circles) ); value = value * value; sum2 = sum2 + (value * value); if( sum > thresh ){ } else{ intersection[index].x = intersection[index].y = intersection[index].z = -1; } } */ } /* sum = 0.0; for( int i = 1 ; i < num_circles; i++){ dif_x = circles[i-1].x - circles[i].x; dif_y = circles[i-1].y - circles[i].y; dif_z = circles[i-1].z - circles[i].z; value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); if( value < rad_circles ){ value = 1 - ( (value * value) / (rad_circles * rad_circles) ); value = value * value; sum = sum + (value * value); if( sum < thresh - 0.48 ){ intersection[index].x = points[index].x; intersection[index].y = points[index].y; intersection[index].z = points[index].z; } } } */ } void init_ppoint3D( point3D*& points, int size ){ for( int i = 0; i < size; i++){ points[i] = point3D( -1, -1, -1); } cout << "vector initialized \n"; } void print_flag( string message, int type){ if( type == 1) cout << message << endl; else if( type == 2) cout << message << "\t"; } int main(void){ time_t timer = time(0); point3D *points, *circles, *intersections; // host copies of a,b,c point3D *d_points, *d_circles, *d_intersections; // device copies of a,b,c //If index starts with negative number, first line //int size_matrix = ( 2 * dim1 ) * ( 2 * dim2 ) * ( 2 * dim3 ) * ( //int size_matrix = ( dim1/incr ) * ( dim2/incr ) * ( dim3/incr ); // number of elements int size_matrix = 8 * ( dim1/incr ) * ( dim2/incr ) * ( dim3/incr ); cout << "# elements: " << size_matrix << endl; int size = size_matrix * sizeof(point3D); cout << "size of matrix: " << size << endl; // Allocate space for device copies of a,b,c hipMalloc( (void **)&d_points, size); hipMalloc( (void **)&d_circles, size_matrix * sizeof(point3D) ); hipMalloc( (void **)&d_intersections, size); // Alloc space for host copies of a,b,c and setup input points = (point3D *)malloc(size); circles = (point3D *)malloc(size_matrix * sizeof(point3D)); intersections = (point3D *)malloc(size); init_ppoint3D( intersections, size_matrix); /* default_random_engine rng( random_device{}() ); uniform_int_distribution<int> dist(-dim1, dim1); */ init_ppoint3D( circles, size_matrix); //int position = dim1 / num_circles; /* circles[0] = point3D( -4, -4, -4); circles[1] = point3D( -1.5, -3, -1.4); //circles[0] = point3D( -6, -6, -6); //circles[1] = point3D( -8, -8, 0); circles[2] = point3D( -6, -6, -6); circles[3] = point3D( -8, 0, 0); circles[4] = point3D( 0, -8, -8); circles[5] = point3D( 0, -8, 0); circles[6] = point3D( 0, 0, -8); circles[7] = point3D( 0, 0, 0); */ default_random_engine rng( random_device{}() ); uniform_int_distribution<int> dist(-dim1, dim1); for( int i = 0 ; i < num_circles; i++){ int xr, yr, zr; xr = dist(rng); yr = dist(rng); zr = dist(rng); circles[i] = point3D( xr, yr, zr); } /*for( int i = 0 ; i < num_circles ; i++){ circles[i].x = i; circles[i].y = i; circles[i].z = i; } */ int index = 0; cout << "Filling vectors" << endl; for( double i = -dim1 ; i < dim1 ; i=i+incr){ for( double j = -dim2 ; j < dim2 ; j=j+incr){ for( double k = -dim3 ; k < dim3 ; k=k+incr){ points[index].x = i ; points[index].y = j ; points[index].z = k ; //printf("%f %f %f %c", points[index].x, points[index].y, points[index].z, '\n'); //mb_file << "v" << setw(space) << (double)a[index].x << setw(space) << (double)a[index].y << setw(space) << (double)a[index].z << endl; //mb_file << "v" << setw(space) << (double)b[index].x << setw(space) << (double)b[index].y << setw(space) << (double)b[index].z << endl; //cout << index <<"\n"; index++; } } } cout << index <<"\n"; cout << "vectors ready" << endl; int cocient = 1000; int num_blocks = index / cocient; int num_threadsxblock = cocient; // Copy inputs to device hipMemcpy(d_points, points, size, hipMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio hipMemcpy(d_circles, circles, size, hipMemcpyHostToDevice); hipMemcpy(d_intersections, intersections, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU //for( int i = 0 ; i < num_circles; i++){ hipLaunchKernelGGL(( add), dim3(num_blocks), dim3(num_threadsxblock), 0, 0, d_points, d_circles, d_intersections); //} // Copy result back to host hipMemcpy(intersections, d_intersections, size, hipMemcpyDeviceToHost); /* for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << a[i].x << setw(space) << a[i].y << setw(space) << a[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << b[i].x << setw(space) << b[i].y << setw(space) << b[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << c[i].x << setw(space) << c[i].y << setw(space) << c[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << d[i].x << setw(space) << d[i].y << setw(space) << d[i].z << endl ; } std::cout << std::endl; */ for(int i = 0; i < size_matrix; ++i){ if( intersections[i].x != -1 && intersections[i].y != -1 && intersections[i].z != -1 ){ mb_file << "v" << setw(space) << intersections[i].x << setw(space) << intersections[i].y << setw(space) << intersections[i].z << endl ; } } cout << std::endl; // Cleanup free(points); free(circles); free(intersections); hipFree(d_points); hipFree(d_circles); hipFree(d_intersections); mb_file.close(); time_t timer2 = time(0); cout <<"Tiempo total: " << difftime(timer2, timer) << endl; return 0; }
2d418100143a772f2220b40e97c6f0739430f4ad.cu
#include <iostream> #include <math.h> #include <iomanip> #include <stdio.h> #include <math.h> #include <vector> #include <fstream> #include "random" #define space 15 #define dim1 100 //X #define dim2 100 //Y #define dim3 100 //Z #define N ( 100000 ) #define M ( 512 ) #define incr 1 #define rad_circles 1 #define num_circles 1000 #define FOR(i,n,inc) for( i = -n; i < n; i=i+inc) //int idx_radius = 0; using namespace std; struct point3D{ double x, y,z; point3D( double _x, double _y, double _z){ x = _x; y = _y; z = _z; } }; vector<point3D> v_point; //vector of points vector<point3D> v_circles; //vector of circles' center ofstream mb_file("data.obj"); //double thresh = 0.5; __global__ void add( point3D* points, point3D* circles, point3D* intersection){ int index = threadIdx.x + blockIdx.x*blockDim.x; //if( circles[index].x != -1 && circles[index].y != -1 && circles[index].z != -1 ){ double sum = 0.0;//, sum2 = 0.0; double dif_x, dif_y, dif_z, value=0.0, thresh = 0.2; for( int i = 0 ; i < num_circles; i++){ dif_x = (points[index].x - circles[i].x); dif_y = (points[index].y - circles[i].y); dif_z = (points[index].z - circles[i].z); //printf("%f %f %f %c", points[index].x, points[index].y, points[index].z, '\n'); value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); //sum = value * value; //printf("%d\n", value); if( value <= rad_circles ){ value = 1 - ( (value*value) / (rad_circles*rad_circles) ); sum = sum + (value * value); if( sum > thresh ){ intersection[index].x = points[index].x; intersection[index].y = points[index].y; intersection[index].z = points[index].z; } } /*dif_x = intersection[index].x - circles[i].x; dif_y = intersection[index].y - circles[i].y; dif_z = intersection[index].z - circles[i].z; value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); if( value <= rad_circles ){ value = 1 - ( (value * value) / (rad_circles * rad_circles) ); value = value * value; sum2 = sum2 + (value * value); if( sum > thresh ){ } else{ intersection[index].x = intersection[index].y = intersection[index].z = -1; } } */ } /* sum = 0.0; for( int i = 1 ; i < num_circles; i++){ dif_x = circles[i-1].x - circles[i].x; dif_y = circles[i-1].y - circles[i].y; dif_z = circles[i-1].z - circles[i].z; value = sqrt( dif_x*dif_x + dif_y*dif_y + dif_z*dif_z ); if( value < rad_circles ){ value = 1 - ( (value * value) / (rad_circles * rad_circles) ); value = value * value; sum = sum + (value * value); if( sum < thresh - 0.48 ){ intersection[index].x = points[index].x; intersection[index].y = points[index].y; intersection[index].z = points[index].z; } } } */ } void init_ppoint3D( point3D*& points, int size ){ for( int i = 0; i < size; i++){ points[i] = point3D( -1, -1, -1); } cout << "vector initialized \n"; } void print_flag( string message, int type){ if( type == 1) cout << message << endl; else if( type == 2) cout << message << "\t"; } int main(void){ time_t timer = time(0); point3D *points, *circles, *intersections; // host copies of a,b,c point3D *d_points, *d_circles, *d_intersections; // device copies of a,b,c //If index starts with negative number, first line //int size_matrix = ( 2 * dim1 ) * ( 2 * dim2 ) * ( 2 * dim3 ) * ( //int size_matrix = ( dim1/incr ) * ( dim2/incr ) * ( dim3/incr ); // number of elements int size_matrix = 8 * ( dim1/incr ) * ( dim2/incr ) * ( dim3/incr ); cout << "# elements: " << size_matrix << endl; int size = size_matrix * sizeof(point3D); cout << "size of matrix: " << size << endl; // Allocate space for device copies of a,b,c cudaMalloc( (void **)&d_points, size); cudaMalloc( (void **)&d_circles, size_matrix * sizeof(point3D) ); cudaMalloc( (void **)&d_intersections, size); // Alloc space for host copies of a,b,c and setup input points = (point3D *)malloc(size); circles = (point3D *)malloc(size_matrix * sizeof(point3D)); intersections = (point3D *)malloc(size); init_ppoint3D( intersections, size_matrix); /* default_random_engine rng( random_device{}() ); uniform_int_distribution<int> dist(-dim1, dim1); */ init_ppoint3D( circles, size_matrix); //int position = dim1 / num_circles; /* circles[0] = point3D( -4, -4, -4); circles[1] = point3D( -1.5, -3, -1.4); //circles[0] = point3D( -6, -6, -6); //circles[1] = point3D( -8, -8, 0); circles[2] = point3D( -6, -6, -6); circles[3] = point3D( -8, 0, 0); circles[4] = point3D( 0, -8, -8); circles[5] = point3D( 0, -8, 0); circles[6] = point3D( 0, 0, -8); circles[7] = point3D( 0, 0, 0); */ default_random_engine rng( random_device{}() ); uniform_int_distribution<int> dist(-dim1, dim1); for( int i = 0 ; i < num_circles; i++){ int xr, yr, zr; xr = dist(rng); yr = dist(rng); zr = dist(rng); circles[i] = point3D( xr, yr, zr); } /*for( int i = 0 ; i < num_circles ; i++){ circles[i].x = i; circles[i].y = i; circles[i].z = i; } */ int index = 0; cout << "Filling vectors" << endl; for( double i = -dim1 ; i < dim1 ; i=i+incr){ for( double j = -dim2 ; j < dim2 ; j=j+incr){ for( double k = -dim3 ; k < dim3 ; k=k+incr){ points[index].x = i ; points[index].y = j ; points[index].z = k ; //printf("%f %f %f %c", points[index].x, points[index].y, points[index].z, '\n'); //mb_file << "v" << setw(space) << (double)a[index].x << setw(space) << (double)a[index].y << setw(space) << (double)a[index].z << endl; //mb_file << "v" << setw(space) << (double)b[index].x << setw(space) << (double)b[index].y << setw(space) << (double)b[index].z << endl; //cout << index <<"\n"; index++; } } } cout << index <<"\n"; cout << "vectors ready" << endl; int cocient = 1000; int num_blocks = index / cocient; int num_threadsxblock = cocient; // Copy inputs to device cudaMemcpy(d_points, points, size, cudaMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio cudaMemcpy(d_circles, circles, size, cudaMemcpyHostToDevice); cudaMemcpy(d_intersections, intersections, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU //for( int i = 0 ; i < num_circles; i++){ add<<< num_blocks, num_threadsxblock>>> (d_points, d_circles, d_intersections); //} // Copy result back to host cudaMemcpy(intersections, d_intersections, size, cudaMemcpyDeviceToHost); /* for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << a[i].x << setw(space) << a[i].y << setw(space) << a[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << b[i].x << setw(space) << b[i].y << setw(space) << b[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << c[i].x << setw(space) << c[i].y << setw(space) << c[i].z << endl ; } std::cout << std::endl; for(int i=0; i < size_matrix; ++i){ std::cout << setw(space) << d[i].x << setw(space) << d[i].y << setw(space) << d[i].z << endl ; } std::cout << std::endl; */ for(int i = 0; i < size_matrix; ++i){ if( intersections[i].x != -1 && intersections[i].y != -1 && intersections[i].z != -1 ){ mb_file << "v" << setw(space) << intersections[i].x << setw(space) << intersections[i].y << setw(space) << intersections[i].z << endl ; } } cout << std::endl; // Cleanup free(points); free(circles); free(intersections); cudaFree(d_points); cudaFree(d_circles); cudaFree(d_intersections); mb_file.close(); time_t timer2 = time(0); cout <<"Tiempo total: " << difftime(timer2, timer) << endl; return 0; }
5ebbc5232288f2bd081c7f03c8eefdd4e76cc1aa.hip
// !!! This is a file automatically generated by hipify!!! //************************** kmeansgpu.cu *************************** //*******************Developed by Jos M. Cecilia******************* //************************* October 2018************************ #include "hip/hip_runtime.h" #include "kmeansgpu.h" #include <hiprand/hiprand.h> #include "hip/device_functions.h" #include <hiprand/hiprand_kernel.h> #include <math.h> #define TAMBLOCK 32 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { const char * error = hipGetErrorString(code); fprintf(stderr, "GPUassert: %s %s %d\n", error, file, line); if (abort) exit(code); } } __global__ void setup_kernel(hiprandState_t *state, unsigned long seed) { int index = threadIdx.x; hiprand_init(seed, index, 0, &state[index]); __syncthreads(); } __global__ void random_init_centroidCUDA(float * cluster_centro_id, float * dataSetMatrix, int clusters, float rows, int columns, hiprandState_t *D_state) { int tx = threadIdx.x; int pos=tx*columns; int random = ceil(hiprand_uniform(&D_state[tx])*rows); for (int i=0; i<columns; i++){ cluster_centro_id[pos+i] = dataSetMatrix[random+i]; //printf ("El random es %f para el thread %d\n", cluster_centro_id[pos+i], tx); } } __device__ float calc_distances(int dim, float *p1, float *p2) { float distance_sq_sum = 0; for (int i = 0; i < dim; ++i) distance_sq_sum += sqr(p1[i] - p2[i]); return distance_sq_sum; } __global__ void calc_all_distancesCUDA(int dim, int k, float *d_X, float *centroid, float *dist) { int tx = threadIdx.x; int pos = tx * k; for(int i=0; i<k; ++i){ dist[pos+i] = calc_distances(dim, &d_X[pos*dim], &centroid[i*dim]); } } __global__ void calc_all_distancesCUDA2(int k, float n, int dim, float *d_X, float *centroid, float * dist) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y * k + threadIdx.y * k; int index = y + x; if(x < k && y < n){ dist[index] = calc_distances(dim, &d_X[y*dim], &centroid[x*dim]); } } __global__ void choose_all_clusters_from_distancesCUDA(float n, int k, float *dist, int *cluster_assignment_index){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x<n){ int best_index = -1; float closest_distance = INFINITY; for (int i = 0; i < k; i++){ float cur_distance = dist[x*k+i]; if(cur_distance < closest_distance){ best_index = i; closest_distance = cur_distance; } } cluster_assignment_index[x] = best_index; } } __global__ void copy_assignment_arrayCUDA(int *src, int *tgt) { int x = blockIdx.x * blockDim.x + threadIdx.x; tgt[x] = src[x]; } __global__ void calc_short_distanceCUDA(float n, int k, float *dist, int *cluster_assignment_index, float *d_short_dist){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x<n){ int active_cluster = cluster_assignment_index[x]; if (active_cluster != -1){ d_short_dist[x] = dist[x*k+active_cluster]; } } } __global__ void suma_arrayCUDA(float n,float * d_short_dist){ int dim; int num = (int) n; if(num%2 != 0){ dim = (num+1)/2; } else{ dim = num/2; } int index = blockIdx.x * blockDim.x + threadIdx.x; int index2 = dim + index; for(int i=0; i<ceil(log2f(num)); i++){ if(index<dim){ d_short_dist[index]+=d_short_dist[index2]; if(dim%2 != 0){ dim = (dim+1)/2; } else{ dim = dim/2; } index2 = dim + index; syncthreads(); } } } void calc_total_distance(float n, int k, float *dist, int *cluster_assignment_index, float *d_short_dist){ dim3 block(ceil(n/TAMBLOCK)); dim3 thread(TAMBLOCK); hipLaunchKernelGGL(( calc_short_distanceCUDA), dim3(block), dim3(thread), 0, 0, n, k, dist, cluster_assignment_index, d_short_dist); block.x = ceil(n/2/TAMBLOCK); hipLaunchKernelGGL(( suma_arrayCUDA), dim3(block), dim3(thread), 0, 0, n,d_short_dist); // return d_short_dist[0]; } __global__ void init_cluster_centroid(int dim, int * cluster_member_count, float * new_cluster_centroid){ int x = threadIdx.x; cluster_member_count[x] = 0; for (int i=0; i < dim; i++){ new_cluster_centroid[x*dim+i] = 0; } } __global__ void sum_all_points_of_cluster(float n, int dim, float *X, int *cluster_assignment_index, int *cluster_member_count, float *new_cluster_centroid){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < n){ int active_cluster = cluster_assignment_index[x]; atomicAdd(&cluster_member_count[active_cluster], 1); for(int i = 0; i < dim; i++){ atomicAdd(&new_cluster_centroid[active_cluster*dim + i], X[x*dim + i]); } } } __global__ void media_points_of_cluster(int dim, int *cluster_member_count, float *new_cluster_centroid){ int x = threadIdx.x; if(cluster_member_count[x] == 0){ cluster_member_count[x] = 0.00005; } for(int i = 0; i < dim; i++){ new_cluster_centroid[x*dim + i] /= cluster_member_count[x]; } } void calc_cluster_centroidsCUDA(int dim, float n, int k, float *X, int *cluster_assignment_index, float *new_cluster_centroid){ int * cluster_member_count; int memsize = sizeof(int) * k; hipMalloc(&cluster_member_count, memsize); dim3 block (k/k); dim3 thread (k); hipLaunchKernelGGL(( init_cluster_centroid) , dim3(block), dim3(thread), 0, 0, dim, cluster_member_count, new_cluster_centroid); block.x = ceil(n/TAMBLOCK); thread.x = TAMBLOCK; hipLaunchKernelGGL(( sum_all_points_of_cluster) , dim3(block), dim3(thread), 0, 0, n, dim, X,cluster_assignment_index, cluster_member_count, new_cluster_centroid); block.x = (k/k); thread.x = k; hipLaunchKernelGGL(( media_points_of_cluster) , dim3(block), dim3(thread), 0, 0, dim, cluster_member_count, new_cluster_centroid); } void mostrar_puntos_clusters(int dim, float n, int k, float *X, int *cluster_assignment_index, float *new_cluster_centroid){ int * cluster_member_count; int memsize = sizeof(int) * k; hipMalloc(&cluster_member_count, memsize); dim3 block (k/k); dim3 thread (k); hipLaunchKernelGGL(( init_cluster_centroid) , dim3(block), dim3(thread), 0, 0, dim, cluster_member_count, new_cluster_centroid); block.x = ceil(n/TAMBLOCK); thread.x = TAMBLOCK; hipLaunchKernelGGL(( sum_all_points_of_cluster) , dim3(block), dim3(thread), 0, 0, n, dim, X,cluster_assignment_index, cluster_member_count, new_cluster_centroid); int * member = (int *) malloc(memsize); hipMemcpy (member, cluster_member_count, memsize, hipMemcpyDeviceToHost); for(int i = 0; i < k; i++){ printf("%d - %d\n", i+1, member[i]); } hipMemcpy (cluster_member_count, member, memsize, hipMemcpyHostToDevice); } extern "C" int kmeansCUDA(int dim, float *H_X, float n, int k, float *H_cluster_centroid, int iterations, int *H_cluster_assignment_final) { float *d_cluster_centroid, *d_X, *d_dist, *d_short_dist; int *d_cluster_assignment_final, *d_cluster_assignment_cur, *d_cluster_assignment_prev; int memsize; memsize = sizeof(float) * k * dim; hipMalloc (&d_cluster_centroid, memsize); memsize = sizeof(float) * n * dim; hipMalloc (&d_X, memsize); hipMemcpy (d_X, H_X, memsize, hipMemcpyHostToDevice); memsize = sizeof(float) * n * k; hipMalloc (&d_dist, memsize); memsize = sizeof(int) * n; hipMalloc(&d_cluster_assignment_final, memsize); hipMemcpy (d_cluster_assignment_final, H_cluster_assignment_final, memsize, hipMemcpyHostToDevice); hipMalloc(&d_cluster_assignment_cur, memsize); hipMalloc(&d_cluster_assignment_prev, memsize); memsize = sizeof(float) * n; hipMalloc(&d_short_dist, memsize); hiprandState_t *devStates; hipMalloc (&devStates, k * sizeof(hiprandState_t)); time_t t; time(&t); dim3 block (k/k); dim3 thread (k); hipLaunchKernelGGL(( setup_kernel), dim3(block), dim3(thread), 0, 0, devStates, (unsigned long) t ); hipLaunchKernelGGL(( random_init_centroidCUDA), dim3(block), dim3(thread), 0, 0, d_cluster_centroid, d_X, k, n, dim, devStates); //dim3 thread (n); //calc_all_distancesCUDA<<<block, thread >>> (dim, k, d_X, d_cluster_centroid, d_dist); block.x = ceil(k/TAMBLOCK); block.y = ceil(n/TAMBLOCK); thread.x=TAMBLOCK; thread.y=TAMBLOCK; hipLaunchKernelGGL(( calc_all_distancesCUDA2), dim3(block), dim3(thread), 0, 0, k, n, dim, d_X, d_cluster_centroid, d_dist); block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; hipLaunchKernelGGL(( choose_all_clusters_from_distancesCUDA) , dim3(block), dim3(thread), 0, 0, n, k, d_dist, d_cluster_assignment_cur); hipLaunchKernelGGL(( copy_assignment_arrayCUDA), dim3(block), dim3(thread), 0, 0, d_cluster_assignment_cur, d_cluster_assignment_prev); calc_total_distance(n, k, d_dist, d_cluster_assignment_cur, d_short_dist); memsize = sizeof(float) * n; float * H_short_dist = (float *) malloc(memsize); hipMemcpy (H_short_dist, d_short_dist, memsize, hipMemcpyDeviceToHost); float prev_totD = H_short_dist[0]; hipMemcpy (d_short_dist, H_short_dist, memsize, hipMemcpyHostToDevice); int numVariations = 0; for(int batch=0; (batch < iterations); ++batch){ calc_cluster_centroidsCUDA(dim, n, k, d_X, d_cluster_assignment_cur, d_cluster_centroid); calc_total_distance(n, k, d_dist, d_cluster_assignment_cur, d_short_dist); memsize = sizeof(float) * n; hipMemcpy (H_short_dist, d_short_dist, memsize, hipMemcpyDeviceToHost); float totD = H_short_dist[0]; hipMemcpy (d_short_dist, H_short_dist, memsize, hipMemcpyHostToDevice); if(totD >= prev_totD){ block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; hipLaunchKernelGGL(( copy_assignment_arrayCUDA) , dim3(block), dim3(thread), 0, 0, d_cluster_assignment_prev, d_cluster_assignment_cur); time(&t); block.x = (k/k); thread.x = k; hipLaunchKernelGGL(( setup_kernel), dim3(block), dim3(thread), 0, 0, devStates, (unsigned long) t ); hipLaunchKernelGGL(( random_init_centroidCUDA), dim3(block), dim3(thread), 0, 0, d_cluster_centroid, d_X, k, n, dim, devStates); } else{ block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; hipLaunchKernelGGL(( copy_assignment_arrayCUDA) , dim3(block), dim3(thread), 0, 0, d_cluster_assignment_cur, d_cluster_assignment_prev); block.x = ceil(k/TAMBLOCK); block.y = ceil(n/TAMBLOCK); thread.x=TAMBLOCK; thread.y=TAMBLOCK; hipLaunchKernelGGL(( calc_all_distancesCUDA2), dim3(block), dim3(thread), 0, 0, k, n, dim, d_X, d_cluster_centroid, d_dist); block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; hipLaunchKernelGGL(( choose_all_clusters_from_distancesCUDA) , dim3(block), dim3(thread), 0, 0, n, k, d_dist, d_cluster_assignment_cur); prev_totD = totD; } } block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; hipLaunchKernelGGL(( copy_assignment_arrayCUDA) , dim3(block), dim3(thread), 0, 0, d_cluster_assignment_cur, d_cluster_assignment_final); //mostrar_puntos_clusters(dim, n, k, d_X, d_cluster_assignment_final, d_cluster_centroid); //printf("Numero de puntos %f.\n",n); memsize = sizeof(int) * n; hipMemcpy (H_cluster_assignment_final, d_cluster_assignment_final, memsize, hipMemcpyDeviceToHost); hipFree(d_cluster_centroid); hipFree(d_X); hipFree(d_dist); hipFree(d_cluster_assignment_cur); hipFree(d_cluster_assignment_prev); hipFree(d_cluster_assignment_final); hipFree(d_short_dist); hipFree(devStates); hipDeviceReset(); return 0; }
5ebbc5232288f2bd081c7f03c8eefdd4e76cc1aa.cu
//************************** kmeansgpu.cu *************************** //*******************Developed by José M. Cecilia******************* //************************* October 2018************************ #include "cuda.h" #include "kmeansgpu.h" #include <curand.h> #include "device_functions.h" #include <curand_kernel.h> #include <math.h> #define TAMBLOCK 32 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { const char * error = cudaGetErrorString(code); fprintf(stderr, "GPUassert: %s %s %d\n", error, file, line); if (abort) exit(code); } } __global__ void setup_kernel(curandState *state, unsigned long seed) { int index = threadIdx.x; curand_init(seed, index, 0, &state[index]); __syncthreads(); } __global__ void random_init_centroidCUDA(float * cluster_centro_id, float * dataSetMatrix, int clusters, float rows, int columns, curandState *D_state) { int tx = threadIdx.x; int pos=tx*columns; int random = ceil(curand_uniform(&D_state[tx])*rows); for (int i=0; i<columns; i++){ cluster_centro_id[pos+i] = dataSetMatrix[random+i]; //printf ("El random es %f para el thread %d\n", cluster_centro_id[pos+i], tx); } } __device__ float calc_distances(int dim, float *p1, float *p2) { float distance_sq_sum = 0; for (int i = 0; i < dim; ++i) distance_sq_sum += sqr(p1[i] - p2[i]); return distance_sq_sum; } __global__ void calc_all_distancesCUDA(int dim, int k, float *d_X, float *centroid, float *dist) { int tx = threadIdx.x; int pos = tx * k; for(int i=0; i<k; ++i){ dist[pos+i] = calc_distances(dim, &d_X[pos*dim], &centroid[i*dim]); } } __global__ void calc_all_distancesCUDA2(int k, float n, int dim, float *d_X, float *centroid, float * dist) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y * k + threadIdx.y * k; int index = y + x; if(x < k && y < n){ dist[index] = calc_distances(dim, &d_X[y*dim], &centroid[x*dim]); } } __global__ void choose_all_clusters_from_distancesCUDA(float n, int k, float *dist, int *cluster_assignment_index){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x<n){ int best_index = -1; float closest_distance = INFINITY; for (int i = 0; i < k; i++){ float cur_distance = dist[x*k+i]; if(cur_distance < closest_distance){ best_index = i; closest_distance = cur_distance; } } cluster_assignment_index[x] = best_index; } } __global__ void copy_assignment_arrayCUDA(int *src, int *tgt) { int x = blockIdx.x * blockDim.x + threadIdx.x; tgt[x] = src[x]; } __global__ void calc_short_distanceCUDA(float n, int k, float *dist, int *cluster_assignment_index, float *d_short_dist){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x<n){ int active_cluster = cluster_assignment_index[x]; if (active_cluster != -1){ d_short_dist[x] = dist[x*k+active_cluster]; } } } __global__ void suma_arrayCUDA(float n,float * d_short_dist){ int dim; int num = (int) n; if(num%2 != 0){ dim = (num+1)/2; } else{ dim = num/2; } int index = blockIdx.x * blockDim.x + threadIdx.x; int index2 = dim + index; for(int i=0; i<ceil(log2f(num)); i++){ if(index<dim){ d_short_dist[index]+=d_short_dist[index2]; if(dim%2 != 0){ dim = (dim+1)/2; } else{ dim = dim/2; } index2 = dim + index; syncthreads(); } } } void calc_total_distance(float n, int k, float *dist, int *cluster_assignment_index, float *d_short_dist){ dim3 block(ceil(n/TAMBLOCK)); dim3 thread(TAMBLOCK); calc_short_distanceCUDA<<<block, thread>>> (n, k, dist, cluster_assignment_index, d_short_dist); block.x = ceil(n/2/TAMBLOCK); suma_arrayCUDA<<<block, thread>>> (n,d_short_dist); // return d_short_dist[0]; } __global__ void init_cluster_centroid(int dim, int * cluster_member_count, float * new_cluster_centroid){ int x = threadIdx.x; cluster_member_count[x] = 0; for (int i=0; i < dim; i++){ new_cluster_centroid[x*dim+i] = 0; } } __global__ void sum_all_points_of_cluster(float n, int dim, float *X, int *cluster_assignment_index, int *cluster_member_count, float *new_cluster_centroid){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < n){ int active_cluster = cluster_assignment_index[x]; atomicAdd(&cluster_member_count[active_cluster], 1); for(int i = 0; i < dim; i++){ atomicAdd(&new_cluster_centroid[active_cluster*dim + i], X[x*dim + i]); } } } __global__ void media_points_of_cluster(int dim, int *cluster_member_count, float *new_cluster_centroid){ int x = threadIdx.x; if(cluster_member_count[x] == 0){ cluster_member_count[x] = 0.00005; } for(int i = 0; i < dim; i++){ new_cluster_centroid[x*dim + i] /= cluster_member_count[x]; } } void calc_cluster_centroidsCUDA(int dim, float n, int k, float *X, int *cluster_assignment_index, float *new_cluster_centroid){ int * cluster_member_count; int memsize = sizeof(int) * k; cudaMalloc(&cluster_member_count, memsize); dim3 block (k/k); dim3 thread (k); init_cluster_centroid <<<block, thread>>> (dim, cluster_member_count, new_cluster_centroid); block.x = ceil(n/TAMBLOCK); thread.x = TAMBLOCK; sum_all_points_of_cluster <<<block, thread>>> (n, dim, X,cluster_assignment_index, cluster_member_count, new_cluster_centroid); block.x = (k/k); thread.x = k; media_points_of_cluster <<<block, thread>>> (dim, cluster_member_count, new_cluster_centroid); } void mostrar_puntos_clusters(int dim, float n, int k, float *X, int *cluster_assignment_index, float *new_cluster_centroid){ int * cluster_member_count; int memsize = sizeof(int) * k; cudaMalloc(&cluster_member_count, memsize); dim3 block (k/k); dim3 thread (k); init_cluster_centroid <<<block, thread>>> (dim, cluster_member_count, new_cluster_centroid); block.x = ceil(n/TAMBLOCK); thread.x = TAMBLOCK; sum_all_points_of_cluster <<<block, thread>>> (n, dim, X,cluster_assignment_index, cluster_member_count, new_cluster_centroid); int * member = (int *) malloc(memsize); cudaMemcpy (member, cluster_member_count, memsize, cudaMemcpyDeviceToHost); for(int i = 0; i < k; i++){ printf("%d - %d\n", i+1, member[i]); } cudaMemcpy (cluster_member_count, member, memsize, cudaMemcpyHostToDevice); } extern "C" int kmeansCUDA(int dim, float *H_X, float n, int k, float *H_cluster_centroid, int iterations, int *H_cluster_assignment_final) { float *d_cluster_centroid, *d_X, *d_dist, *d_short_dist; int *d_cluster_assignment_final, *d_cluster_assignment_cur, *d_cluster_assignment_prev; int memsize; memsize = sizeof(float) * k * dim; cudaMalloc (&d_cluster_centroid, memsize); memsize = sizeof(float) * n * dim; cudaMalloc (&d_X, memsize); cudaMemcpy (d_X, H_X, memsize, cudaMemcpyHostToDevice); memsize = sizeof(float) * n * k; cudaMalloc (&d_dist, memsize); memsize = sizeof(int) * n; cudaMalloc(&d_cluster_assignment_final, memsize); cudaMemcpy (d_cluster_assignment_final, H_cluster_assignment_final, memsize, cudaMemcpyHostToDevice); cudaMalloc(&d_cluster_assignment_cur, memsize); cudaMalloc(&d_cluster_assignment_prev, memsize); memsize = sizeof(float) * n; cudaMalloc(&d_short_dist, memsize); curandState *devStates; cudaMalloc (&devStates, k * sizeof(curandState)); time_t t; time(&t); dim3 block (k/k); dim3 thread (k); setup_kernel<<<block, thread>>> (devStates, (unsigned long) t ); random_init_centroidCUDA<<<block, thread>>> (d_cluster_centroid, d_X, k, n, dim, devStates); //dim3 thread (n); //calc_all_distancesCUDA<<<block, thread >>> (dim, k, d_X, d_cluster_centroid, d_dist); block.x = ceil(k/TAMBLOCK); block.y = ceil(n/TAMBLOCK); thread.x=TAMBLOCK; thread.y=TAMBLOCK; calc_all_distancesCUDA2<<<block, thread>>> (k, n, dim, d_X, d_cluster_centroid, d_dist); block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; choose_all_clusters_from_distancesCUDA <<<block, thread>>> (n, k, d_dist, d_cluster_assignment_cur); copy_assignment_arrayCUDA<<<block, thread>>> (d_cluster_assignment_cur, d_cluster_assignment_prev); calc_total_distance(n, k, d_dist, d_cluster_assignment_cur, d_short_dist); memsize = sizeof(float) * n; float * H_short_dist = (float *) malloc(memsize); cudaMemcpy (H_short_dist, d_short_dist, memsize, cudaMemcpyDeviceToHost); float prev_totD = H_short_dist[0]; cudaMemcpy (d_short_dist, H_short_dist, memsize, cudaMemcpyHostToDevice); int numVariations = 0; for(int batch=0; (batch < iterations); ++batch){ calc_cluster_centroidsCUDA(dim, n, k, d_X, d_cluster_assignment_cur, d_cluster_centroid); calc_total_distance(n, k, d_dist, d_cluster_assignment_cur, d_short_dist); memsize = sizeof(float) * n; cudaMemcpy (H_short_dist, d_short_dist, memsize, cudaMemcpyDeviceToHost); float totD = H_short_dist[0]; cudaMemcpy (d_short_dist, H_short_dist, memsize, cudaMemcpyHostToDevice); if(totD >= prev_totD){ block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; copy_assignment_arrayCUDA <<<block, thread>>>(d_cluster_assignment_prev, d_cluster_assignment_cur); time(&t); block.x = (k/k); thread.x = k; setup_kernel<<<block, thread>>> (devStates, (unsigned long) t ); random_init_centroidCUDA<<<block, thread>>> (d_cluster_centroid, d_X, k, n, dim, devStates); } else{ block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; copy_assignment_arrayCUDA <<<block, thread>>>(d_cluster_assignment_cur, d_cluster_assignment_prev); block.x = ceil(k/TAMBLOCK); block.y = ceil(n/TAMBLOCK); thread.x=TAMBLOCK; thread.y=TAMBLOCK; calc_all_distancesCUDA2<<<block, thread>>> (k, n, dim, d_X, d_cluster_centroid, d_dist); block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; choose_all_clusters_from_distancesCUDA <<<block, thread>>> (n, k, d_dist, d_cluster_assignment_cur); prev_totD = totD; } } block.x = ceil(n/TAMBLOCK); block.y = 1; thread.x = TAMBLOCK; thread.y = 1; copy_assignment_arrayCUDA <<<block, thread>>>(d_cluster_assignment_cur, d_cluster_assignment_final); //mostrar_puntos_clusters(dim, n, k, d_X, d_cluster_assignment_final, d_cluster_centroid); //printf("Numero de puntos %f.\n",n); memsize = sizeof(int) * n; cudaMemcpy (H_cluster_assignment_final, d_cluster_assignment_final, memsize, cudaMemcpyDeviceToHost); cudaFree(d_cluster_centroid); cudaFree(d_X); cudaFree(d_dist); cudaFree(d_cluster_assignment_cur); cudaFree(d_cluster_assignment_prev); cudaFree(d_cluster_assignment_final); cudaFree(d_short_dist); cudaFree(devStates); cudaDeviceReset(); return 0; }
7756d6a6227b937571c3df33c9dbc5435c06fad7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************//** * \file LHS1.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the left hand side for the intermediate velocity solve */ #include "LHS1.h" namespace kernels { __global__ void LHS1_mid_iter_X(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xu, double *yu, double *alpha, double *uB, //xu, yu not used double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny) return; int iu = threadIdx.x + blockDim.x * blockIdx.x, I = iu % (nx-1), J = iu / (nx-1); if (I == 0 || I == nx-2 || J == 0 || J == ny-1) return; //int numE = i*5; // top row - corner mid sides current row int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I] *(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J] *(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I] *(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iu])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iu]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iu + (nx-1) - 1, iu + (nx-1), iu + (nx-1) + 1, iu - 1 , iu , iu + 1, iu - (nx-1) - 1, iu - (nx-1), iu - (nx-1) + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iu; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iu] = (1-alpha[iu])/Cns[4]; interp_rhs[iu] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iu]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iu; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; //this should be minus? } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iu) interp_rhs[iu] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iu + (nx-1), iu + 1, iu - (nx-1), iu -1, iu}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iu) { row[numE] = iu; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iu; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iu] = 0; interp_rhs[iu] = 2*uB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); //EAST row[numE] = iu; col[numE] = iu+1; val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //WEST row[numE] = iu; col[numE] = iu-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //NORTH row[numE] = iu; col[numE] = iu+(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5))/temp; numE++; //SOUTH row[numE] = iu; col[numE] = iu-(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5))/temp; numE++; //CENTER row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; ns_rhs[iu] = 1/temp; interp_rhs[iu] = 0; } } __global__ void LHS1_mid_iter_Y(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xv, double *yv, double *alpha, double *vB, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1)) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iv = ip + (nx-1)*ny; if (I == 0 || I == nx-1 || J == 0 || J == ny-2) return; int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I]*(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J]*(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I]*(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iv])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iv]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iv + nx - 1, iv + nx, iv + nx + 1, iv - 1 , iv , iv + 1, iv - nx - 1, iv - nx, iv - nx + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iv; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iv] = (1-alpha[iv])/Cns[4]; interp_rhs[iv] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iv]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iv; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iv) interp_rhs[iv] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iv+nx, iv+1, iv-nx, iv-1, iv}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iv) { row[numE] = iv; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iv; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iv] = 0; interp_rhs[iv] = 2*vB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)) + 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); //EAST row[numE] = iv; col[numE] = iv+1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5))/temp; numE++; //WEST row[numE] = iv; col[numE] = iv-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5))/temp; numE++; //NORTH row[numE] = iv; col[numE] = iv + nx; val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //SOUTH row[numE] = iv; col[numE] = iv-nx; val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //CENTER row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; ns_rhs[iv] = 1/temp; interp_rhs[iv] = 0; } } }//end kernel
7756d6a6227b937571c3df33c9dbc5435c06fad7.cu
/***************************************************************************//** * \file LHS1.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the left hand side for the intermediate velocity solve */ #include "LHS1.h" namespace kernels { __global__ void LHS1_mid_iter_X(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xu, double *yu, double *alpha, double *uB, //xu, yu not used double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny) return; int iu = threadIdx.x + blockDim.x * blockIdx.x, I = iu % (nx-1), J = iu / (nx-1); if (I == 0 || I == nx-2 || J == 0 || J == ny-1) return; //int numE = i*5; // top row - corner mid sides current row int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I] *(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J] *(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I] *(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iu])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iu]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iu + (nx-1) - 1, iu + (nx-1), iu + (nx-1) + 1, iu - 1 , iu , iu + 1, iu - (nx-1) - 1, iu - (nx-1), iu - (nx-1) + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iu; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iu] = (1-alpha[iu])/Cns[4]; interp_rhs[iu] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iu]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iu; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; //this should be minus? } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iu) interp_rhs[iu] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iu]>0) { int interp_index[4] = {index1[iu], index2[iu], index3[iu], index4[iu]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iu + (nx-1), iu + 1, iu - (nx-1), iu -1, iu}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iu], q2[iu], q3[iu], q4[iu]}; double CInterp[4]; CInterp[0] = q1coef[iu]; CInterp[1] = q2coef[iu]; CInterp[2] = q3coef[iu]; CInterp[3] = q4coef[iu]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iu) { row[numE] = iu; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iu; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iu] = 0; interp_rhs[iu] = 2*uB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); //EAST row[numE] = iu; col[numE] = iu+1; val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //WEST row[numE] = iu; col[numE] = iu-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5))/temp; numE++; //NORTH row[numE] = iu; col[numE] = iu+(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5))/temp; numE++; //SOUTH row[numE] = iu; col[numE] = iu-(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5))/temp; numE++; //CENTER row[numE] = iu; col[numE] = iu; val[numE] = 1; numE++; ns_rhs[iu] = 1/temp; interp_rhs[iu] = 0; } } __global__ void LHS1_mid_iter_Y(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny, int *hybridTagsUV, int *ghostTagsUV, double *ns_rhs, double *interp_rhs, int *count, int *index1, int *index2, int *index3, int *index4, double *xv, double *yv, double *alpha, double *vB, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *q1, double *q2, double *q3, double *q4 ) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1)) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iv = ip + (nx-1)*ny; if (I == 0 || I == nx-1 || J == 0 || J == ny-2) return; int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1; double temp = 1; if (hybridTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; double Cns[5]; Cns[0] = -dt*nu/(dy[J+1]*(dy[J]+dy[J+1])); Cns[1] = -dt*nu/(dx[I]*(dx[I]+dx[I+1])); Cns[2] = -dt*nu/(dy[J]*(dy[J]+dy[J+1])); Cns[3] = -dt*nu/(dx[I]*(dx[I]+dx[I-1])); Cns[4] = 1-Cns[0] - Cns[1] - Cns[2] - Cns[3]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; for (int l=0; l<4; l++) { Cns[l] = Cns[l]*(1-alpha[iv])/Cns[4]; CInterp[l] = CInterp[l]*alpha[iv]; } /* 0 1 2 NW N NE * 3 4 5 W P E * 6 7 8 SW S SE */ int stencil_index[9] = {iv + nx - 1, iv + nx, iv + nx + 1, iv - 1 , iv , iv + 1, iv - nx - 1, iv - nx, iv - nx + 1}; double stencil[9] = {0, Cns[0], 0, Cns[3], 1, Cns[1], 0, Cns[2], 0}; //combine ns and interp stencils bool stencil_used[9] = {false, true, false, true, true, true, false, true, false}; for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && m != 4) { stencil[m] -= CInterp[n]; //flag should this be minus? } } } //add ns to sparse matrix for (int m = 0; m<9; m++) { if (stencil_used[m]) { row[numE] = iv; col[numE] = stencil_index[m]; val[numE] = stencil[m]; numE++; } } ns_rhs[iv] = (1-alpha[iv])/Cns[4]; interp_rhs[iv] = 0; //calc new numE numE = ny*(nx-1)*5 - ny*2 - (nx-1)*2 + nx*(ny-1)*5 - nx*2 - (ny-1)*2 + count[iv]-1; //add interp corner to sparse matrix for (int n=0;n<4;n++) { for (int m=0;m<9;m++) { if (stencil_index[m] == interp_index[n] && !stencil_used[m]) { row[numE] = iv; col[numE] = interp_index[n]; val[numE] = -CInterp[n]; } //else if(stencil_index[m] == interp_index[n] && stencil_used[m]) else if(stencil_index[m] == interp_index[n] && interp_index[n] == iv) interp_rhs[iv] += CInterp[n]*q[n]; } } } else if (ghostTagsUV[iv]>0) { int interp_index[4] = {index1[iv], index2[iv], index3[iv], index4[iv]}; bool interp_in[4] = {false, false, false, false}; int ns_index[5] = {iv+nx, iv+1, iv-nx, iv-1, iv}; //n e s w p bool ns_overlap[5] = {false, false, false, false, true}; double q[4] = {q1[iv], q2[iv], q3[iv], q4[iv]}; double CInterp[4]; CInterp[0] = q1coef[iv]; CInterp[1] = q2coef[iv]; CInterp[2] = q3coef[iv]; CInterp[3] = q4coef[iv]; //count the number of nodes the interp is using //find how which ns nodes are occupied int counter = 0; temp = 0; for (int l=0; l<4; l++) { if (ghostTagsUV[interp_index[l]]>0) { counter +=1; interp_in[l] = true; } for (int n=0; n<5; n++) { if (interp_index[l] == ns_index[n]) ns_overlap[n] = true; } } //add center to matrix row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; //add real interp values to matrix for (int i=0; i<4; i++) { if (!interp_in[i] && interp_index[i] != iv) { row[numE] = iv; col[numE] = interp_index[i]; val[numE] = CInterp[i]; numE++; } else { temp -= CInterp[i] * q[i]; } } //fill remainder of values int counter2 = 0; for (int i=0; i<5; i++) { if (counter2>=counter) break; if (ns_overlap[i]==false) { row[numE] = iv; col[numE] = ns_index[i]; val[numE] = 0; numE++; counter2++; } } ns_rhs[iv] = 0; interp_rhs[iv] = 2*vB[0] + temp;//flag this doesn't account for the interpolation part } else { temp = 1 + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)) + 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)) + 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)) + 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); //EAST row[numE] = iv; col[numE] = iv+1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5))/temp; numE++; //WEST row[numE] = iv; col[numE] = iv-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5))/temp; numE++; //NORTH row[numE] = iv; col[numE] = iv + nx; val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //SOUTH row[numE] = iv; col[numE] = iv-nx; val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5))/temp; numE++; //CENTER row[numE] = iv; col[numE] = iv; val[numE] = 1; numE++; ns_rhs[iv] = 1/temp; interp_rhs[iv] = 0; } } }//end kernel
fb5afd201e7c1d8ec89861016be6e3a5d3cf113a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define SIZE 32 /* Autores: * * Antonio J. Cabrera * Paul Gazel-Anthoine */ // STRUCTS typedef struct bmpFileHeader { /* 2 bytes de identificacin */ uint32_t size; /* Tamao del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeader { uint32_t headersize; /* Tamao de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamao de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; // Rutinas BMP unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) { FILE *f; bmpFileHeader header; /* cabecera */ unsigned char *imgdata; /* datos de imagen */ uint16_t type; /* 2 bytes identificativos */ f=fopen (filename, "r"); if (!f) { /* Si no podemos leer, no hay imagen */ printf("NO se puede abrir el fichero %s\n", filename); return NULL; } /* Leemos los dos primeros bytes y comprobamos el formato */ fread(&type, sizeof(uint16_t), 1, f); if (type !=0x4D42) { fclose(f); printf("%s NO es una imagen BMP\n", filename); return NULL; } /* Leemos la cabecera del fichero */ fread(&header, sizeof(bmpFileHeader), 1, f); printf("File size: %u\n", header.size); printf("Reservado: %u\n", header.resv1); printf("Reservado: %u\n", header.resv2); printf("Offset: %u\n", header.offset); /* Leemos la cabecera de informacin del BMP */ fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f); /* Reservamos memoria para la imagen, lo que indique imgsize */ if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height; imgdata = (unsigned char*) malloc(bInfoHeader->imgsize); if (imgdata == NULL) { printf("Fallo en el malloc, del fichero %s\n", filename); exit(0); } /* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */ fseek(f, header.offset, SEEK_SET); /* Leemos los datos de la imagen, tantos bytes como imgsize */ fread(imgdata, bInfoHeader->imgsize,1, f); /* Cerramos el fichero */ fclose(f); /* Devolvemos la imagen */ return imgdata; } bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) { bmpInfoHeader *InfoHeader; bool IH; IH = malloc(sizeof(bmpInfoHeader)); if (!IH) return NULL; InfoHeader->headersize = sizeof(bmpInfoHeader); InfoHeader->width = width; InfoHeader->height = height; InfoHeader->planes = 1; InfoHeader->bpp = 24; InfoHeader->compress = 0; /* 3 bytes por pixel, width*height pixels, el tamao de las filas ha de ser multiplo de 4 */ InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height; InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54); InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */ InfoHeader->colors = 0; InfoHeader->imxtcolors = 0; return InfoHeader; } void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) { bmpFileHeader header; FILE *f; uint16_t type; f=fopen(filename, "w+"); header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2 header.resv1 = 0; header.resv2 = 0; /* El offset ser el tamao de las dos cabeceras + 2 (informacin de fichero)*/ header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2 /* Escribimos la identificacin del archivo */ type=0x4D42; fwrite(&type, sizeof(type),1,f); /* Escribimos la cabecera de fichero */ fwrite(&header, sizeof(bmpFileHeader),1,f); /* Escribimos la informacin bsica de la imagen */ fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f); /* Escribimos la imagen */ fwrite(imgdata, InfoHeader->imgsize, 1, f); fclose(f); } void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader) { printf("\n"); printf("Informacion de %s\n", FileName); printf("Tamao de la cabecera: %u bytes\n", InfoHeader->headersize); printf("Anchura: %d pixels\n", InfoHeader->width); printf("Altura: %d pixels\n", InfoHeader->height); printf("Planos (1): %d\n", InfoHeader->planes); printf("Bits por pixel: %d\n", InfoHeader->bpp); printf("Compresion: %d\n", InfoHeader->compress); printf("Tamao de la imagen: %u bytes\n", InfoHeader->imgsize); printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx); printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy); if (InfoHeader->bpmx == 0) InfoHeader->bpmx = (unsigned) ((double)24*100/2.54); if (InfoHeader->bpmy == 0) InfoHeader->bpmy = (unsigned) ((double)24*100/2.54); printf("Colores en paleta: %d\n", InfoHeader->colors); printf("Colores importantes: %d\n", InfoHeader->imxtcolors); } /* ------------------------------------------------ Nuestro Cdigo ------------------------------------------------ */ __global__ void KernelByN (int N, int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) A[row*N+col*3] = ((A[row*N+col*3] + A[row*N+col*3+1] + A[row*N+col*3+2])/3); } __global__ void KernelSobel1(int N,int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { double magnitudX, magnitudY; if(col != 0 && row != 0 && col != NS-1 && row != M-1) { magnitudX = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row)*N+(col-1)*3]*(-2) + A[(row+1)*N+(col+1)*3]*(-1) + A[(row-1)*N+(col+1)*3] + A[row*N+(col+1)*3]*2 + A[(row+1)*N+(col+1)*3]); magnitudY = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row+1)*N+(col-1)*3] + A[(row-1)*N+col*3]*(-2) + A[(row+1)*N+col*3]*2 + A[(row-1)*N+(col+1)*3]*(-1) + A[(row+1)*N+(col+1)*3]); S[row*NS+col] = (int)sqrt(magnitudX*magnitudX + magnitudY*magnitudY); } else S[row*NS+col] = 0; } } __global__ void KernelReduction1(int NT,int *S, int *oMin, int *oMax) { //Reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (S[i] > -1) { if (sdataMax[tid] < S[i]) sdataMax[tid] = S[i]; if (sdataMin[tid] > S[i]) sdataMin[tid] = S[i]; } if (i+blockDim.x < NT && S[i+blockDim.x] > -1) { if (sdataMax[tid] < S[i+blockDim.x]) sdataMax[tid] = S[i+blockDim.x]; if (sdataMin[tid] > S[i+blockDim.x]) sdataMin[tid] = S[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { oMax[blockIdx.x] = sdataMax[0]; oMin[blockIdx.x] = sdataMin[0]; } } __global__ void KernelReduction2(int NT, int *Min, int *Max, float *factor) { //Last step of reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (sdataMax[tid] < Max[i]) sdataMax[tid] = Max[i]; if (sdataMin[tid] > Min[i]) sdataMin[tid] = Min[i]; if(i+blockDim.x < NT){ if (sdataMax[tid] < Max[i+blockDim.x]) sdataMax[tid] = Max[i+blockDim.x]; if (sdataMin[tid] > Min[i+blockDim.x]) sdataMin[tid] = Min[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { *factor = (float)(255.0/(float)(sdataMax[0]-sdataMin[0])); } } __global__ void KernelSobel2 (int N,int M,unsigned char *A, int *S, int NS, float *factor) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { if(col != 0 && row != 0 && col != NS-1 && row != M-1) A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = (unsigned char)(S[row*NS+col] * factor[0]); else A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = 0; } } int main(int argc, char** argv) { unsigned int N, M, NS; unsigned int numBytesA, numBytesS, numBytesR; unsigned int nBlocksX, nBlocksY, nBlocksR, nThreads; float TiempoTotal, TiempoKernel, *d_factor, *h_factor, factor; hipEvent_t E0, E1, E2, E3; unsigned char *d_A; int *d_S, *d_OutMax, *d_OutMin; if (argc != 3 && argc != 4) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); } if (argc == 4){ factor = atof(argv[3]); h_factor = &factor; } printf("INICIO\n"); bmpInfoHeader header; unsigned char *image; image = LoadBMP(argv[1], &header); unsigned int N3 = header.width * 3; N = (N3+3) & 0xFFFFFFFC; //fila multiplo de 4 (BMP) M = header.height; NS = header.width; nThreads = SIZE; // numero de Blocks en cada dimension nBlocksX = (NS+nThreads-1)/nThreads; nBlocksY = (M+nThreads-1)/nThreads; numBytesA = N * M * sizeof(unsigned char); numBytesS = NS * M * sizeof(int); nBlocksR = ((NS * M)+(nThreads*nThreads-1)) / (nThreads*nThreads); numBytesR = nBlocksR *sizeof(int); dim3 dimGrid(nBlocksX, nBlocksY, 1); dim3 dimBlock(nThreads, nThreads, 1); dim3 dimGridR(nBlocksR, 1, 1); dim3 dimBlockR(nThreads * nThreads, 1, 1); hipEventCreate(&E0); hipEventCreate(&E1); hipEventCreate(&E2); hipEventCreate(&E3); hipEventRecord(E0, 0); hipEventSynchronize(E0); // Obtener Memoria en el device hipMalloc((unsigned char**)&d_A, numBytesA); hipMalloc((int**)&d_S, numBytesS); hipMalloc((int**)&d_OutMax, numBytesR); hipMalloc((int**)&d_OutMin, numBytesR); hipMalloc((float**)&d_factor, sizeof(float)); // Copiar datos del host al device hipMemcpy(d_A, image, numBytesA, hipMemcpyHostToDevice); hipMemcpy(d_factor, h_factor, sizeof(float), hipMemcpyHostToDevice); hipEventRecord(E1, 0); hipEventSynchronize(E1); // Ejecutar el kernel hipLaunchKernelGGL(( KernelByN), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS); hipLaunchKernelGGL(( KernelSobel1), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS); hipLaunchKernelGGL(( KernelReduction1), dim3(dimGridR),dim3(dimBlockR), 0, 0, NS*M, d_S, d_OutMin, d_OutMax); if(argc==3)hipLaunchKernelGGL(( KernelReduction2), dim3(1),dim3(dimBlockR), 0, 0, nBlocksR, d_OutMin, d_OutMax, d_factor); hipLaunchKernelGGL(( KernelSobel2), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS, d_factor); hipEventRecord(E2, 0); hipEventSynchronize(E2); // Obtener el resultado desde el host hipMemcpy(image, d_A, numBytesA, hipMemcpyDeviceToHost); // Liberar Memoria del device hipFree(d_A); hipFree(d_S); hipFree(d_OutMin); hipFree(d_OutMax); hipFree(d_factor); hipEventRecord(E3, 0); hipEventSynchronize(E3); hipEventElapsedTime(&TiempoTotal, E0, E3); hipEventElapsedTime(&TiempoKernel, E1, E2); printf("\nKERNEL ByN & Reductions & Sobel\n"); printf("Dimensiones: %dx%d\n", NS, M); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocksX, nBlocksY, nBlocksX*nBlocksY); printf("nThreadsR1: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR1: %dx%d (%d)\n", nBlocksR, 1, nBlocksR); printf("nThreadsR2: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR2: %dx%d (%d)\n", 1, 1, 1); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3); char nom[32]; strcpy(nom, argv[2]); strcat(nom, "_"); strcat(nom,argv[1]); SaveBMP(nom, &header, image); }
fb5afd201e7c1d8ec89861016be6e3a5d3cf113a.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define SIZE 32 /* Autores: * * Antonio J. Cabrera * Paul Gazel-Anthoine */ // STRUCTS typedef struct bmpFileHeader { /* 2 bytes de identificación */ uint32_t size; /* Tamaño del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeader { uint32_t headersize; /* Tamaño de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamaño de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; // Rutinas BMP unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) { FILE *f; bmpFileHeader header; /* cabecera */ unsigned char *imgdata; /* datos de imagen */ uint16_t type; /* 2 bytes identificativos */ f=fopen (filename, "r"); if (!f) { /* Si no podemos leer, no hay imagen */ printf("NO se puede abrir el fichero %s\n", filename); return NULL; } /* Leemos los dos primeros bytes y comprobamos el formato */ fread(&type, sizeof(uint16_t), 1, f); if (type !=0x4D42) { fclose(f); printf("%s NO es una imagen BMP\n", filename); return NULL; } /* Leemos la cabecera del fichero */ fread(&header, sizeof(bmpFileHeader), 1, f); printf("File size: %u\n", header.size); printf("Reservado: %u\n", header.resv1); printf("Reservado: %u\n", header.resv2); printf("Offset: %u\n", header.offset); /* Leemos la cabecera de información del BMP */ fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f); /* Reservamos memoria para la imagen, lo que indique imgsize */ if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height; imgdata = (unsigned char*) malloc(bInfoHeader->imgsize); if (imgdata == NULL) { printf("Fallo en el malloc, del fichero %s\n", filename); exit(0); } /* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */ fseek(f, header.offset, SEEK_SET); /* Leemos los datos de la imagen, tantos bytes como imgsize */ fread(imgdata, bInfoHeader->imgsize,1, f); /* Cerramos el fichero */ fclose(f); /* Devolvemos la imagen */ return imgdata; } bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) { bmpInfoHeader *InfoHeader; bool IH; IH = malloc(sizeof(bmpInfoHeader)); if (!IH) return NULL; InfoHeader->headersize = sizeof(bmpInfoHeader); InfoHeader->width = width; InfoHeader->height = height; InfoHeader->planes = 1; InfoHeader->bpp = 24; InfoHeader->compress = 0; /* 3 bytes por pixel, width*height pixels, el tamaño de las filas ha de ser multiplo de 4 */ InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height; InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54); InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */ InfoHeader->colors = 0; InfoHeader->imxtcolors = 0; return InfoHeader; } void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) { bmpFileHeader header; FILE *f; uint16_t type; f=fopen(filename, "w+"); header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2 header.resv1 = 0; header.resv2 = 0; /* El offset será el tamaño de las dos cabeceras + 2 (información de fichero)*/ header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2 /* Escribimos la identificación del archivo */ type=0x4D42; fwrite(&type, sizeof(type),1,f); /* Escribimos la cabecera de fichero */ fwrite(&header, sizeof(bmpFileHeader),1,f); /* Escribimos la información básica de la imagen */ fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f); /* Escribimos la imagen */ fwrite(imgdata, InfoHeader->imgsize, 1, f); fclose(f); } void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader) { printf("\n"); printf("Informacion de %s\n", FileName); printf("Tamaño de la cabecera: %u bytes\n", InfoHeader->headersize); printf("Anchura: %d pixels\n", InfoHeader->width); printf("Altura: %d pixels\n", InfoHeader->height); printf("Planos (1): %d\n", InfoHeader->planes); printf("Bits por pixel: %d\n", InfoHeader->bpp); printf("Compresion: %d\n", InfoHeader->compress); printf("Tamaño de la imagen: %u bytes\n", InfoHeader->imgsize); printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx); printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy); if (InfoHeader->bpmx == 0) InfoHeader->bpmx = (unsigned) ((double)24*100/2.54); if (InfoHeader->bpmy == 0) InfoHeader->bpmy = (unsigned) ((double)24*100/2.54); printf("Colores en paleta: %d\n", InfoHeader->colors); printf("Colores importantes: %d\n", InfoHeader->imxtcolors); } /* ------------------------------------------------ Nuestro Código ------------------------------------------------ */ __global__ void KernelByN (int N, int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) A[row*N+col*3] = ((A[row*N+col*3] + A[row*N+col*3+1] + A[row*N+col*3+2])/3); } __global__ void KernelSobel1(int N,int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { double magnitudX, magnitudY; if(col != 0 && row != 0 && col != NS-1 && row != M-1) { magnitudX = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row)*N+(col-1)*3]*(-2) + A[(row+1)*N+(col+1)*3]*(-1) + A[(row-1)*N+(col+1)*3] + A[row*N+(col+1)*3]*2 + A[(row+1)*N+(col+1)*3]); magnitudY = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row+1)*N+(col-1)*3] + A[(row-1)*N+col*3]*(-2) + A[(row+1)*N+col*3]*2 + A[(row-1)*N+(col+1)*3]*(-1) + A[(row+1)*N+(col+1)*3]); S[row*NS+col] = (int)sqrt(magnitudX*magnitudX + magnitudY*magnitudY); } else S[row*NS+col] = 0; } } __global__ void KernelReduction1(int NT,int *S, int *oMin, int *oMax) { //Reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (S[i] > -1) { if (sdataMax[tid] < S[i]) sdataMax[tid] = S[i]; if (sdataMin[tid] > S[i]) sdataMin[tid] = S[i]; } if (i+blockDim.x < NT && S[i+blockDim.x] > -1) { if (sdataMax[tid] < S[i+blockDim.x]) sdataMax[tid] = S[i+blockDim.x]; if (sdataMin[tid] > S[i+blockDim.x]) sdataMin[tid] = S[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { oMax[blockIdx.x] = sdataMax[0]; oMin[blockIdx.x] = sdataMin[0]; } } __global__ void KernelReduction2(int NT, int *Min, int *Max, float *factor) { //Last step of reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (sdataMax[tid] < Max[i]) sdataMax[tid] = Max[i]; if (sdataMin[tid] > Min[i]) sdataMin[tid] = Min[i]; if(i+blockDim.x < NT){ if (sdataMax[tid] < Max[i+blockDim.x]) sdataMax[tid] = Max[i+blockDim.x]; if (sdataMin[tid] > Min[i+blockDim.x]) sdataMin[tid] = Min[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { *factor = (float)(255.0/(float)(sdataMax[0]-sdataMin[0])); } } __global__ void KernelSobel2 (int N,int M,unsigned char *A, int *S, int NS, float *factor) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { if(col != 0 && row != 0 && col != NS-1 && row != M-1) A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = (unsigned char)(S[row*NS+col] * factor[0]); else A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = 0; } } int main(int argc, char** argv) { unsigned int N, M, NS; unsigned int numBytesA, numBytesS, numBytesR; unsigned int nBlocksX, nBlocksY, nBlocksR, nThreads; float TiempoTotal, TiempoKernel, *d_factor, *h_factor, factor; cudaEvent_t E0, E1, E2, E3; unsigned char *d_A; int *d_S, *d_OutMax, *d_OutMin; if (argc != 3 && argc != 4) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); } if (argc == 4){ factor = atof(argv[3]); h_factor = &factor; } printf("INICIO\n"); bmpInfoHeader header; unsigned char *image; image = LoadBMP(argv[1], &header); unsigned int N3 = header.width * 3; N = (N3+3) & 0xFFFFFFFC; //fila multiplo de 4 (BMP) M = header.height; NS = header.width; nThreads = SIZE; // numero de Blocks en cada dimension nBlocksX = (NS+nThreads-1)/nThreads; nBlocksY = (M+nThreads-1)/nThreads; numBytesA = N * M * sizeof(unsigned char); numBytesS = NS * M * sizeof(int); nBlocksR = ((NS * M)+(nThreads*nThreads-1)) / (nThreads*nThreads); numBytesR = nBlocksR *sizeof(int); dim3 dimGrid(nBlocksX, nBlocksY, 1); dim3 dimBlock(nThreads, nThreads, 1); dim3 dimGridR(nBlocksR, 1, 1); dim3 dimBlockR(nThreads * nThreads, 1, 1); cudaEventCreate(&E0); cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); cudaEventRecord(E0, 0); cudaEventSynchronize(E0); // Obtener Memoria en el device cudaMalloc((unsigned char**)&d_A, numBytesA); cudaMalloc((int**)&d_S, numBytesS); cudaMalloc((int**)&d_OutMax, numBytesR); cudaMalloc((int**)&d_OutMin, numBytesR); cudaMalloc((float**)&d_factor, sizeof(float)); // Copiar datos del host al device cudaMemcpy(d_A, image, numBytesA, cudaMemcpyHostToDevice); cudaMemcpy(d_factor, h_factor, sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); // Ejecutar el kernel KernelByN<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS); KernelSobel1<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS); KernelReduction1<<<dimGridR,dimBlockR>>>(NS*M, d_S, d_OutMin, d_OutMax); if(argc==3) KernelReduction2<<<1,dimBlockR>>>(nBlocksR, d_OutMin, d_OutMax, d_factor); KernelSobel2<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS, d_factor); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); // Obtener el resultado desde el host cudaMemcpy(image, d_A, numBytesA, cudaMemcpyDeviceToHost); // Liberar Memoria del device cudaFree(d_A); cudaFree(d_S); cudaFree(d_OutMin); cudaFree(d_OutMax); cudaFree(d_factor); cudaEventRecord(E3, 0); cudaEventSynchronize(E3); cudaEventElapsedTime(&TiempoTotal, E0, E3); cudaEventElapsedTime(&TiempoKernel, E1, E2); printf("\nKERNEL ByN & Reductions & Sobel\n"); printf("Dimensiones: %dx%d\n", NS, M); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocksX, nBlocksY, nBlocksX*nBlocksY); printf("nThreadsR1: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR1: %dx%d (%d)\n", nBlocksR, 1, nBlocksR); printf("nThreadsR2: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR2: %dx%d (%d)\n", 1, 1, 1); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); char nom[32]; strcpy(nom, argv[2]); strcat(nom, "_"); strcat(nom,argv[1]); SaveBMP(nom, &header, image); }
1a36d4f68713178ac9f3d765ef209efe37b62235.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <vector> #include <map> #include <type_traits> #include <memory> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/gather.h> #include "gtest/gtest.h" #include "gmock/gmock.h" #include <cudf.h> #include <cudf/functions.h> #include <dataframe/cudf_table.cuh> #include <hash/hash_functions.cuh> #include <utilities/int_fastdiv.h> #include <rmm/thrust_rmm_allocator.h> #include "tests/utilities/cudf_test_utils.cuh" #include "tests/utilities/cudf_test_fixtures.h" template <template <typename> class hash_function, typename size_type> struct row_partition_mapper { __device__ row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions) : the_table{table_to_hash}, num_partitions{_num_partitions} {} __device__ hash_value_type operator()(size_type row_index) const { return the_table.template hash_row<hash_function>(row_index) % num_partitions; } gdf_table<size_type> const & the_table; // Using int_fastdiv can return results different from using the normal modulus // operation, therefore we need to use it in result verfication as well size_type num_partitions; }; // Put all repeated setup and validation stuff here template <class test_parameters> struct HashPartitionTest : public GdfTest { constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function; const int num_cols_to_hash = test_parameters::num_cols_to_hash; std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns, and the value_type of each // vector determines the data type of the column using multi_column_t = typename test_parameters::multi_column_t; multi_column_t input_columns; multi_column_t output_columns; // Containers for unique_ptrs to gdf_columns // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_columns; std::vector<gdf_col_pointer> gdf_output_columns; // Containers for the raw pointers to the gdf_columns std::vector<gdf_column*> raw_gdf_input_columns; std::vector<gdf_column*> raw_gdf_output_columns; HashPartitionTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~HashPartitionTest() { } void create_input( size_t num_rows, size_t max_value, bool print = false) { initialize_tuple(input_columns, num_rows, max_value); initialize_tuple(output_columns, num_rows, max_value); gdf_input_columns = initialize_gdf_columns(input_columns); gdf_output_columns = initialize_gdf_columns(output_columns); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_columns){ this->raw_gdf_input_columns.push_back(c.get()); } for(auto const& c : gdf_output_columns){ this->raw_gdf_output_columns.push_back(c.get()); } if(print) { std::cout << "Input column(s) created. Size: " << std::get<0>(input_columns).size() << std::endl; print_tuple(input_columns); } } std::vector<int> compute_gdf_result(const int num_partitions, bool print = false) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error result_error{GDF_SUCCESS}; gdf_column ** gdf_input_columns = raw_gdf_input_columns.data(); gdf_column ** gdf_output_columns = raw_gdf_output_columns.data(); std::vector<int> partition_offsets(num_partitions,0); result_error = gdf_hash_partition(num_columns, gdf_input_columns, this->cols_to_hash.data(), this->num_cols_to_hash, num_partitions, gdf_output_columns, partition_offsets.data(), gdf_hash_function); EXPECT_EQ(GDF_SUCCESS, result_error); if(print) { std::cout << "Partition offsets: "; for(int i = 0; i < num_partitions; ++i) { std::cout << partition_offsets[i] << " "; } std::cout << std::endl; } return partition_offsets; } void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false) { std::vector<gdf_column*> gdf_cols_to_hash; for(int i = 0; i < num_cols_to_hash; ++i) { gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]); } // Create a table from the gdf output of only the columns that were hashed std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())}; rmm::device_vector<int> row_partition_numbers(table_to_hash->get_column_length()); // Compute the partition number for every row in the result switch(gdf_hash_function) { case GDF_HASH_MURMUR3: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions)); break; } case GDF_HASH_IDENTITY: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions)); break; } default: std::cerr << "Invalid GDF hash function.\n"; } std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length()); hipMemcpy(host_row_partition_numbers.data(), row_partition_numbers.data().get(), table_to_hash->get_column_length() * sizeof(int), hipMemcpyDeviceToHost); if(print) { std::cout << "Row partition numbers:\n"; std::copy(host_row_partition_numbers.begin(), host_row_partition_numbers.end(), std::ostream_iterator<int>(std::cout, ", ")); std::cout << std::endl; } // Check that the partition number for every row is correct for(int partition_number = 0; partition_number < num_partitions; ++partition_number) { const int partition_start = partition_offsets[partition_number]; int partition_stop{0}; if(partition_number < (num_partitions - 1)) { partition_stop = partition_offsets[partition_number + 1]; } // The end of the last partition is the end of the table else { partition_stop = table_to_hash->get_column_length(); } // Everything in the current partition should have the same partition // number for(int i = partition_start; i < partition_stop; ++i) { EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!"; } } } }; template< typename tuple_of_vectors, gdf_hash_func hash, int... cols> struct TestParameters { static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)), "The number of columns to hash must be less than or equal to the total number of columns."); // The tuple of vectors that determines the number and types of the columns using multi_column_t = tuple_of_vectors; // The hash function to use constexpr static const gdf_hash_func gdf_hash_function = hash; // The number of columns to hash constexpr static const int num_cols_to_hash{sizeof...(cols)}; // The indices of the columns that will be hashed to determine the partitions constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}}; }; // Using Google Tests "Type Parameterized Tests" // Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of // TestParameters defined below // The number and types of columns determined by the number and types of vectors // in the VTuple<...> // The hash function to be used is determined by the gdf_hash_func enum // The columns to be hashed to determine the partition assignment are the last N integer template // arguments, where N <= the number of columns specified in the VTuple typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >, TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>, TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0> >Implementations; TYPED_TEST_CASE(HashPartitionTest, Implementations); TYPED_TEST(HashPartitionTest, ExampleTest) { const int num_partitions = 5; this->create_input(100, 100); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, OnePartition) { const int num_partitions = 1; this->create_input(100000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, TenPartitions) { const int num_partitions = 10; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, EightPartitions) { const int num_partitions = 8; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, 257Partitions) { const int num_partitions = 257; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); }
1a36d4f68713178ac9f3d765ef209efe37b62235.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <vector> #include <map> #include <type_traits> #include <memory> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/gather.h> #include "gtest/gtest.h" #include "gmock/gmock.h" #include <cudf.h> #include <cudf/functions.h> #include <dataframe/cudf_table.cuh> #include <hash/hash_functions.cuh> #include <utilities/int_fastdiv.h> #include <rmm/thrust_rmm_allocator.h> #include "tests/utilities/cudf_test_utils.cuh" #include "tests/utilities/cudf_test_fixtures.h" template <template <typename> class hash_function, typename size_type> struct row_partition_mapper { __device__ row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions) : the_table{table_to_hash}, num_partitions{_num_partitions} {} __device__ hash_value_type operator()(size_type row_index) const { return the_table.template hash_row<hash_function>(row_index) % num_partitions; } gdf_table<size_type> const & the_table; // Using int_fastdiv can return results different from using the normal modulus // operation, therefore we need to use it in result verfication as well size_type num_partitions; }; // Put all repeated setup and validation stuff here template <class test_parameters> struct HashPartitionTest : public GdfTest { constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function; const int num_cols_to_hash = test_parameters::num_cols_to_hash; std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns, and the value_type of each // vector determines the data type of the column using multi_column_t = typename test_parameters::multi_column_t; multi_column_t input_columns; multi_column_t output_columns; // Containers for unique_ptrs to gdf_columns // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_columns; std::vector<gdf_col_pointer> gdf_output_columns; // Containers for the raw pointers to the gdf_columns std::vector<gdf_column*> raw_gdf_input_columns; std::vector<gdf_column*> raw_gdf_output_columns; HashPartitionTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~HashPartitionTest() { } void create_input( size_t num_rows, size_t max_value, bool print = false) { initialize_tuple(input_columns, num_rows, max_value); initialize_tuple(output_columns, num_rows, max_value); gdf_input_columns = initialize_gdf_columns(input_columns); gdf_output_columns = initialize_gdf_columns(output_columns); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_columns){ this->raw_gdf_input_columns.push_back(c.get()); } for(auto const& c : gdf_output_columns){ this->raw_gdf_output_columns.push_back(c.get()); } if(print) { std::cout << "Input column(s) created. Size: " << std::get<0>(input_columns).size() << std::endl; print_tuple(input_columns); } } std::vector<int> compute_gdf_result(const int num_partitions, bool print = false) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error result_error{GDF_SUCCESS}; gdf_column ** gdf_input_columns = raw_gdf_input_columns.data(); gdf_column ** gdf_output_columns = raw_gdf_output_columns.data(); std::vector<int> partition_offsets(num_partitions,0); result_error = gdf_hash_partition(num_columns, gdf_input_columns, this->cols_to_hash.data(), this->num_cols_to_hash, num_partitions, gdf_output_columns, partition_offsets.data(), gdf_hash_function); EXPECT_EQ(GDF_SUCCESS, result_error); if(print) { std::cout << "Partition offsets: "; for(int i = 0; i < num_partitions; ++i) { std::cout << partition_offsets[i] << " "; } std::cout << std::endl; } return partition_offsets; } void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false) { std::vector<gdf_column*> gdf_cols_to_hash; for(int i = 0; i < num_cols_to_hash; ++i) { gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]); } // Create a table from the gdf output of only the columns that were hashed std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())}; rmm::device_vector<int> row_partition_numbers(table_to_hash->get_column_length()); // Compute the partition number for every row in the result switch(gdf_hash_function) { case GDF_HASH_MURMUR3: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions)); break; } case GDF_HASH_IDENTITY: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions)); break; } default: std::cerr << "Invalid GDF hash function.\n"; } std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length()); cudaMemcpy(host_row_partition_numbers.data(), row_partition_numbers.data().get(), table_to_hash->get_column_length() * sizeof(int), cudaMemcpyDeviceToHost); if(print) { std::cout << "Row partition numbers:\n"; std::copy(host_row_partition_numbers.begin(), host_row_partition_numbers.end(), std::ostream_iterator<int>(std::cout, ", ")); std::cout << std::endl; } // Check that the partition number for every row is correct for(int partition_number = 0; partition_number < num_partitions; ++partition_number) { const int partition_start = partition_offsets[partition_number]; int partition_stop{0}; if(partition_number < (num_partitions - 1)) { partition_stop = partition_offsets[partition_number + 1]; } // The end of the last partition is the end of the table else { partition_stop = table_to_hash->get_column_length(); } // Everything in the current partition should have the same partition // number for(int i = partition_start; i < partition_stop; ++i) { EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!"; } } } }; template< typename tuple_of_vectors, gdf_hash_func hash, int... cols> struct TestParameters { static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)), "The number of columns to hash must be less than or equal to the total number of columns."); // The tuple of vectors that determines the number and types of the columns using multi_column_t = tuple_of_vectors; // The hash function to use constexpr static const gdf_hash_func gdf_hash_function = hash; // The number of columns to hash constexpr static const int num_cols_to_hash{sizeof...(cols)}; // The indices of the columns that will be hashed to determine the partitions constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}}; }; // Using Google Tests "Type Parameterized Tests" // Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of // TestParameters defined below // The number and types of columns determined by the number and types of vectors // in the VTuple<...> // The hash function to be used is determined by the gdf_hash_func enum // The columns to be hashed to determine the partition assignment are the last N integer template // arguments, where N <= the number of columns specified in the VTuple typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >, TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>, TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0> >Implementations; TYPED_TEST_CASE(HashPartitionTest, Implementations); TYPED_TEST(HashPartitionTest, ExampleTest) { const int num_partitions = 5; this->create_input(100, 100); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, OnePartition) { const int num_partitions = 1; this->create_input(100000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, TenPartitions) { const int num_partitions = 10; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, EightPartitions) { const int num_partitions = 8; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, 257Partitions) { const int num_partitions = 257; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); }
b0b82ea3342870ae3029d14eefb14d80fdfa414f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <iostream> #include "kernel_hip.cuh" #include "string.h" using namespace std; const int THREADS_IN_BLOCK = 512; const int POINTS_PER_THREAD = 128; //using kernels to locate and free 2 dimensional arrays template<typename T> __global__ void freeArray(T** d_array, unsigned int first_size) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < first_size) free(d_array[tid]); } //result is d_array[first_size][second_size] template<typename T> __global__ void allocArray(T** d_array, unsigned int first_size, unsigned int second_size) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < first_size) d_array[tid] = (T*)malloc(second_size * sizeof(T)); } template<typename T> static T** init_d_array(unsigned int first_size, unsigned int second_size) { T** result; CHECK_ERRORS(hipMalloc(&result, first_size * sizeof(T*))); //count how many blocks do we need, add one to it if something left allocArray << <first_size / THREADS_IN_BLOCK + !!(first_size % THREADS_IN_BLOCK), THREADS_IN_BLOCK >> > (result, first_size, second_size); hipDeviceSynchronize(); return result; } template<typename T> static void free_d_array(T** d_array, unsigned int first_size) { freeArray << <first_size / THREADS_IN_BLOCK + !!(first_size%THREADS_IN_BLOCK), THREADS_IN_BLOCK >> > (d_array, first_size); hipDeviceSynchronize(); CHECK_ERRORS(hipFree(d_array)); } // Kernel used to run threads FULL of points , every thread checks full POINTS_PER_THREAD points. __global__ void KMeansFullThreads(double *points, unsigned int number_of_centroids, unsigned int nT, unsigned int *membership, double * d_centres, unsigned int **centroid_size, double **new_centres, unsigned int *d, unsigned int offset = 0) { extern __shared__ double centres[]; unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x + offset; d[tid] = 0; for (int i = 0; i < number_of_centroids; ++i) { centroid_size[i][tid] = 0; } unsigned int tid_second = threadIdx.x; //in case there are less threads than fields in the shared centres array while (tid_second < number_of_centroids * DIMENSION) { centres[tid_second] = d_centres[tid_second]; tid_second += blockDim.x; } for (int i = 0; i < number_of_centroids*DIMENSION; ++i) { new_centres[i][tid] = 0; } for (int i = 0; i < nT; ++i) { double dist1 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], centres); unsigned int current_centroid_number = 0; for (int j = 1; j < number_of_centroids; ++j) { double dist2 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], &centres[j * DIMENSION]); if (dist2 < dist1) { dist1 = dist2; current_centroid_number = j; } } if (membership[i + nT * tid] != current_centroid_number) { ++(d[tid]); membership[i + nT * tid] = current_centroid_number; } centroid_size[current_centroid_number][tid]++; for (int j = 0; j < DIMENSION; ++j) { new_centres[current_centroid_number * DIMENSION + j][tid] += points[i * DIMENSION + nT * tid * DIMENSION + j]; } } } //kernel used with only one thread that checks how_many_points , not full POINTS_PER_THREAD __global__ void KMeansSingleNotFullThread(double *points, unsigned int number_of_centroids, unsigned int nT, unsigned int *membership, double * d_centres, unsigned int **centroid_size, double **new_centres, unsigned int *number_of_changes, unsigned int offset, unsigned int how_many_points) { extern __shared__ double centres[]; unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x + offset; number_of_changes[tid] = 0; for (int i = 0; i < number_of_centroids; ++i) { centroid_size[i][tid] = 0; } unsigned int tid_second = threadIdx.x; //in case there are less threads than fields in the shared centres array while (tid_second < number_of_centroids * DIMENSION) { centres[tid_second] = d_centres[tid_second]; tid_second += blockDim.x; } for (int i = 0; i < number_of_centroids * DIMENSION; ++i) { new_centres[i][tid] = 0; } for (int i = 0; i < how_many_points; ++i) { double dist = distance(&points[i * DIMENSION + nT * tid * DIMENSION], centres); unsigned int current_centroid_number = 0; for (int j = 1; j < number_of_centroids; ++j) { double dist2 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], &centres[j * DIMENSION]); if (dist2 < dist) { dist = dist2; current_centroid_number = j; } } if (membership[i + nT * tid] != current_centroid_number) { number_of_changes[tid] += 1; membership[i + nT * tid] = current_centroid_number; } centroid_size[current_centroid_number][tid]++; for (int j = 0; j < DIMENSION; ++j) { new_centres[current_centroid_number * DIMENSION + j][tid] += points[i * DIMENSION + nT * tid * DIMENSION + j]; } } } void kMeansGPU(double points[][DIMENSION], unsigned int number_of_centroids, unsigned int number_of_points, unsigned int membership[], double centres[][DIMENSION], double threshold) { // we take first number_of_centroids points as the starting centroids unsigned int THREADS = number_of_points / POINTS_PER_THREAD; unsigned int BLOCKS = THREADS / THREADS_IN_BLOCK; unsigned int THREADS_IN_NOT_FULL_BLOCK = THREADS - BLOCKS * THREADS_IN_BLOCK; unsigned int POINTS_IN_NOT_FULL_THREAD = number_of_points % POINTS_PER_THREAD; //printf("GPU: %d %d %d %d %d\n", number_of_points,THREADS, BLOCKS, THREADS_IN_NOT_FULL_BLOCK, POINTS_IN_NOT_FULL_THREAD); //if there are points in the not full thread, add 1 THREADS += !!POINTS_IN_NOT_FULL_THREAD; for (int i = 0; i < number_of_centroids; ++i) { for (int j = 0; j < DIMENSION; ++j) { centres[i][j] = points[i][j]; } } double **d_new_centres, *d_centres, **d_new_centres_h; unsigned int *d_membership, **d_centroid_size, *d_number_of_changes, **d_centroid_size_h; CHECK_ERRORS(hipMalloc(&d_membership, sizeof(unsigned int) * number_of_points)); CHECK_ERRORS(hipMemset(d_membership, ~0, sizeof(unsigned int) * number_of_points)); CHECK_ERRORS(hipMalloc(&d_centres, number_of_centroids * DIMENSION * sizeof(double))); CHECK_ERRORS(hipMalloc(&d_number_of_changes, sizeof(unsigned int) * THREADS)); d_new_centres = init_d_array<double>(number_of_centroids * DIMENSION, THREADS); d_new_centres_h = new double*[number_of_centroids*DIMENSION]; CHECK_ERRORS(hipMemcpy(d_new_centres_h, d_new_centres, sizeof(double*)*number_of_centroids*DIMENSION, hipMemcpyDeviceToHost)); d_centroid_size = init_d_array<unsigned int>(number_of_centroids, THREADS); d_centroid_size_h = new unsigned int*[number_of_centroids]; CHECK_ERRORS(hipMemcpy(d_centroid_size_h, d_centroid_size, sizeof(unsigned int*)*number_of_centroids, hipMemcpyDeviceToHost)); unsigned int number_of_changes; double *d_points; CHECK_ERRORS(hipMalloc(&d_points, DIMENSION * number_of_points * sizeof(double))); CHECK_ERRORS(hipMemcpy(d_points, points, DIMENSION * number_of_points * sizeof(double), hipMemcpyHostToDevice)); unsigned int iteration = 0; do { number_of_changes = 0; CHECK_ERRORS(hipMemcpy(d_centres, centres, number_of_centroids * DIMENSION * sizeof(double), hipMemcpyHostToDevice)); if (BLOCKS) { KMeansFullThreads <<< BLOCKS, THREADS_IN_BLOCK, number_of_centroids * DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, 0); CHECK_ERRORS(hipDeviceSynchronize()); } if (THREADS_IN_NOT_FULL_BLOCK) { KMeansFullThreads << < 1, THREADS_IN_NOT_FULL_BLOCK, number_of_centroids * DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, THREADS_IN_BLOCK * BLOCKS); CHECK_ERRORS(hipDeviceSynchronize()); } if (POINTS_IN_NOT_FULL_THREAD) { KMeansSingleNotFullThread <<< 1, 1, number_of_centroids*DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, THREADS_IN_BLOCK *BLOCKS + THREADS_IN_NOT_FULL_BLOCK, POINTS_IN_NOT_FULL_THREAD); CHECK_ERRORS(hipDeviceSynchronize()); } for (int i = 0; i < number_of_centroids; ++i) { thrust::device_ptr<unsigned int> d_1 = thrust::device_pointer_cast(d_centroid_size_h[i]); unsigned int quotient = thrust::reduce(d_1, d_1 + THREADS); //if there is no point in a centroid , we would divide by 0, so we set it to 1 //quotient = quotient == 0 ? 1 : quotient; //printf("centroid number is %d, quotient is %u\n", i, quotient); for (int j = 0; j < DIMENSION; ++j) { thrust::device_ptr<double> d_2 = thrust::device_pointer_cast(d_new_centres_h[i * DIMENSION + j]); centres[i][j] = thrust::reduce(d_2, d_2 + THREADS) / quotient; } //printf("center of %d is : %f, %f, %f\n", i, centres[i][0], centres[i][1], centres[i][2]); } thrust::device_ptr<unsigned int> d_3 = thrust::device_pointer_cast(d_number_of_changes); number_of_changes = thrust::reduce(d_3, d_3 + THREADS); //printf("End of iteration number %d, d is equal to %u)\n", iteration , d); ++iteration; } while (number_of_changes / (float)number_of_points > threshold); printf("Number of iterations: %u", number_of_changes); CHECK_ERRORS(hipMemcpy(membership, d_membership, sizeof(unsigned int)*number_of_points, hipMemcpyDeviceToHost)); CHECK_ERRORS(hipFree(d_number_of_changes)); CHECK_ERRORS(hipFree(d_membership)); CHECK_ERRORS(hipFree(d_points)); free_d_array(d_new_centres, number_of_centroids * DIMENSION); free_d_array(d_centroid_size, number_of_centroids); }
b0b82ea3342870ae3029d14eefb14d80fdfa414f.cu
#include <thrust/device_vector.h> #include <thrust/reduce.h> #include <iostream> #include "kernel.cuh" #include "string.h" using namespace std; const int THREADS_IN_BLOCK = 512; const int POINTS_PER_THREAD = 128; //using kernels to locate and free 2 dimensional arrays template<typename T> __global__ void freeArray(T** d_array, unsigned int first_size) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < first_size) free(d_array[tid]); } //result is d_array[first_size][second_size] template<typename T> __global__ void allocArray(T** d_array, unsigned int first_size, unsigned int second_size) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid < first_size) d_array[tid] = (T*)malloc(second_size * sizeof(T)); } template<typename T> static T** init_d_array(unsigned int first_size, unsigned int second_size) { T** result; CHECK_ERRORS(cudaMalloc(&result, first_size * sizeof(T*))); //count how many blocks do we need, add one to it if something left allocArray << <first_size / THREADS_IN_BLOCK + !!(first_size % THREADS_IN_BLOCK), THREADS_IN_BLOCK >> > (result, first_size, second_size); cudaDeviceSynchronize(); return result; } template<typename T> static void free_d_array(T** d_array, unsigned int first_size) { freeArray << <first_size / THREADS_IN_BLOCK + !!(first_size%THREADS_IN_BLOCK), THREADS_IN_BLOCK >> > (d_array, first_size); cudaDeviceSynchronize(); CHECK_ERRORS(cudaFree(d_array)); } // Kernel used to run threads FULL of points , every thread checks full POINTS_PER_THREAD points. __global__ void KMeansFullThreads(double *points, unsigned int number_of_centroids, unsigned int nT, unsigned int *membership, double * d_centres, unsigned int **centroid_size, double **new_centres, unsigned int *d, unsigned int offset = 0) { extern __shared__ double centres[]; unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x + offset; d[tid] = 0; for (int i = 0; i < number_of_centroids; ++i) { centroid_size[i][tid] = 0; } unsigned int tid_second = threadIdx.x; //in case there are less threads than fields in the shared centres array while (tid_second < number_of_centroids * DIMENSION) { centres[tid_second] = d_centres[tid_second]; tid_second += blockDim.x; } for (int i = 0; i < number_of_centroids*DIMENSION; ++i) { new_centres[i][tid] = 0; } for (int i = 0; i < nT; ++i) { double dist1 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], centres); unsigned int current_centroid_number = 0; for (int j = 1; j < number_of_centroids; ++j) { double dist2 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], &centres[j * DIMENSION]); if (dist2 < dist1) { dist1 = dist2; current_centroid_number = j; } } if (membership[i + nT * tid] != current_centroid_number) { ++(d[tid]); membership[i + nT * tid] = current_centroid_number; } centroid_size[current_centroid_number][tid]++; for (int j = 0; j < DIMENSION; ++j) { new_centres[current_centroid_number * DIMENSION + j][tid] += points[i * DIMENSION + nT * tid * DIMENSION + j]; } } } //kernel used with only one thread that checks how_many_points , not full POINTS_PER_THREAD __global__ void KMeansSingleNotFullThread(double *points, unsigned int number_of_centroids, unsigned int nT, unsigned int *membership, double * d_centres, unsigned int **centroid_size, double **new_centres, unsigned int *number_of_changes, unsigned int offset, unsigned int how_many_points) { extern __shared__ double centres[]; unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x + offset; number_of_changes[tid] = 0; for (int i = 0; i < number_of_centroids; ++i) { centroid_size[i][tid] = 0; } unsigned int tid_second = threadIdx.x; //in case there are less threads than fields in the shared centres array while (tid_second < number_of_centroids * DIMENSION) { centres[tid_second] = d_centres[tid_second]; tid_second += blockDim.x; } for (int i = 0; i < number_of_centroids * DIMENSION; ++i) { new_centres[i][tid] = 0; } for (int i = 0; i < how_many_points; ++i) { double dist = distance(&points[i * DIMENSION + nT * tid * DIMENSION], centres); unsigned int current_centroid_number = 0; for (int j = 1; j < number_of_centroids; ++j) { double dist2 = distance(&points[i * DIMENSION + nT * tid * DIMENSION], &centres[j * DIMENSION]); if (dist2 < dist) { dist = dist2; current_centroid_number = j; } } if (membership[i + nT * tid] != current_centroid_number) { number_of_changes[tid] += 1; membership[i + nT * tid] = current_centroid_number; } centroid_size[current_centroid_number][tid]++; for (int j = 0; j < DIMENSION; ++j) { new_centres[current_centroid_number * DIMENSION + j][tid] += points[i * DIMENSION + nT * tid * DIMENSION + j]; } } } void kMeansGPU(double points[][DIMENSION], unsigned int number_of_centroids, unsigned int number_of_points, unsigned int membership[], double centres[][DIMENSION], double threshold) { // we take first number_of_centroids points as the starting centroids unsigned int THREADS = number_of_points / POINTS_PER_THREAD; unsigned int BLOCKS = THREADS / THREADS_IN_BLOCK; unsigned int THREADS_IN_NOT_FULL_BLOCK = THREADS - BLOCKS * THREADS_IN_BLOCK; unsigned int POINTS_IN_NOT_FULL_THREAD = number_of_points % POINTS_PER_THREAD; //printf("GPU: %d %d %d %d %d\n", number_of_points,THREADS, BLOCKS, THREADS_IN_NOT_FULL_BLOCK, POINTS_IN_NOT_FULL_THREAD); //if there are points in the not full thread, add 1 THREADS += !!POINTS_IN_NOT_FULL_THREAD; for (int i = 0; i < number_of_centroids; ++i) { for (int j = 0; j < DIMENSION; ++j) { centres[i][j] = points[i][j]; } } double **d_new_centres, *d_centres, **d_new_centres_h; unsigned int *d_membership, **d_centroid_size, *d_number_of_changes, **d_centroid_size_h; CHECK_ERRORS(cudaMalloc(&d_membership, sizeof(unsigned int) * number_of_points)); CHECK_ERRORS(cudaMemset(d_membership, ~0, sizeof(unsigned int) * number_of_points)); CHECK_ERRORS(cudaMalloc(&d_centres, number_of_centroids * DIMENSION * sizeof(double))); CHECK_ERRORS(cudaMalloc(&d_number_of_changes, sizeof(unsigned int) * THREADS)); d_new_centres = init_d_array<double>(number_of_centroids * DIMENSION, THREADS); d_new_centres_h = new double*[number_of_centroids*DIMENSION]; CHECK_ERRORS(cudaMemcpy(d_new_centres_h, d_new_centres, sizeof(double*)*number_of_centroids*DIMENSION, cudaMemcpyDeviceToHost)); d_centroid_size = init_d_array<unsigned int>(number_of_centroids, THREADS); d_centroid_size_h = new unsigned int*[number_of_centroids]; CHECK_ERRORS(cudaMemcpy(d_centroid_size_h, d_centroid_size, sizeof(unsigned int*)*number_of_centroids, cudaMemcpyDeviceToHost)); unsigned int number_of_changes; double *d_points; CHECK_ERRORS(cudaMalloc(&d_points, DIMENSION * number_of_points * sizeof(double))); CHECK_ERRORS(cudaMemcpy(d_points, points, DIMENSION * number_of_points * sizeof(double), cudaMemcpyHostToDevice)); unsigned int iteration = 0; do { number_of_changes = 0; CHECK_ERRORS(cudaMemcpy(d_centres, centres, number_of_centroids * DIMENSION * sizeof(double), cudaMemcpyHostToDevice)); if (BLOCKS) { KMeansFullThreads <<< BLOCKS, THREADS_IN_BLOCK, number_of_centroids * DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, 0); CHECK_ERRORS(cudaDeviceSynchronize()); } if (THREADS_IN_NOT_FULL_BLOCK) { KMeansFullThreads << < 1, THREADS_IN_NOT_FULL_BLOCK, number_of_centroids * DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, THREADS_IN_BLOCK * BLOCKS); CHECK_ERRORS(cudaDeviceSynchronize()); } if (POINTS_IN_NOT_FULL_THREAD) { KMeansSingleNotFullThread <<< 1, 1, number_of_centroids*DIMENSION * sizeof(double) >> > (d_points, number_of_centroids, POINTS_PER_THREAD, d_membership, d_centres, d_centroid_size, d_new_centres, d_number_of_changes, THREADS_IN_BLOCK *BLOCKS + THREADS_IN_NOT_FULL_BLOCK, POINTS_IN_NOT_FULL_THREAD); CHECK_ERRORS(cudaDeviceSynchronize()); } for (int i = 0; i < number_of_centroids; ++i) { thrust::device_ptr<unsigned int> d_1 = thrust::device_pointer_cast(d_centroid_size_h[i]); unsigned int quotient = thrust::reduce(d_1, d_1 + THREADS); //if there is no point in a centroid , we would divide by 0, so we set it to 1 //quotient = quotient == 0 ? 1 : quotient; //printf("centroid number is %d, quotient is %u\n", i, quotient); for (int j = 0; j < DIMENSION; ++j) { thrust::device_ptr<double> d_2 = thrust::device_pointer_cast(d_new_centres_h[i * DIMENSION + j]); centres[i][j] = thrust::reduce(d_2, d_2 + THREADS) / quotient; } //printf("center of %d is : %f, %f, %f\n", i, centres[i][0], centres[i][1], centres[i][2]); } thrust::device_ptr<unsigned int> d_3 = thrust::device_pointer_cast(d_number_of_changes); number_of_changes = thrust::reduce(d_3, d_3 + THREADS); //printf("End of iteration number %d, d is equal to %u)\n", iteration , d); ++iteration; } while (number_of_changes / (float)number_of_points > threshold); printf("Number of iterations: %u", number_of_changes); CHECK_ERRORS(cudaMemcpy(membership, d_membership, sizeof(unsigned int)*number_of_points, cudaMemcpyDeviceToHost)); CHECK_ERRORS(cudaFree(d_number_of_changes)); CHECK_ERRORS(cudaFree(d_membership)); CHECK_ERRORS(cudaFree(d_points)); free_d_array(d_new_centres, number_of_centroids * DIMENSION); free_d_array(d_centroid_size, number_of_centroids); }
6b425088f2f4bb89f2ffa7c13ff0d428fce5e591.hip
// !!! This is a file automatically generated by hipify!!! #include "EnemyBulletUpdater.h" #include <hip/hip_runtime.h> #include "../EnemyBullet.h" #include "EnemyBulletUpdaterKernel.cuh" void EnemyBulletUpdater::Run() { int length = EnemyBulletPtr::length; // CPUGPU hipMemcpyAsync( EnemyBulletPtr::device, EnemyBulletPtr::host, sizeof(EnemyBullet) * length, hipMemcpyHostToDevice, EnemyBulletPtr::stream); dim3 block(256, 1, 1); dim3 grid((length + 256 - 1) / 256, 1, 1); // EnemyBulletUpdaterKernel::Process << <grid, block, 0, EnemyBulletPtr::stream >> > (EnemyBulletPtr::device, length); // GPUCPU hipMemcpyAsync( EnemyBulletPtr::host, EnemyBulletPtr::device, sizeof(EnemyBullet) * length, hipMemcpyDeviceToHost, EnemyBulletPtr::stream); }
6b425088f2f4bb89f2ffa7c13ff0d428fce5e591.cu
#include "EnemyBulletUpdater.h" #include <cuda_runtime.h> #include "../EnemyBullet.h" #include "EnemyBulletUpdaterKernel.cuh" void EnemyBulletUpdater::Run() { int length = EnemyBulletPtr::length; // CPUからGPUにデータを転送 cudaMemcpyAsync( EnemyBulletPtr::device, EnemyBulletPtr::host, sizeof(EnemyBullet) * length, cudaMemcpyHostToDevice, EnemyBulletPtr::stream); dim3 block(256, 1, 1); dim3 grid((length + 256 - 1) / 256, 1, 1); // 敵弾更新カーネル実行 EnemyBulletUpdaterKernel::Process << <grid, block, 0, EnemyBulletPtr::stream >> > (EnemyBulletPtr::device, length); // GPUからCPUにデータを転送 cudaMemcpyAsync( EnemyBulletPtr::host, EnemyBulletPtr::device, sizeof(EnemyBullet) * length, cudaMemcpyDeviceToHost, EnemyBulletPtr::stream); }
72c7af634d97dcd5c0b6a117a40ec5c7eb319a6f.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************/ /* */ /* MOD_CUDA.CU - Core CUDA routines for model training */ /* */ /******************************************************************************/ #define STRICT #include <windows.h> #include <commctrl.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <ctype.h> #include <malloc.h> #include <new.h> #include <float.h> #include <hip/driver_types.h> #include <hip/hip_runtime_api.h> #include "convnet.rh" #include "const.h" #include "classes.h" #include "extern.h" #include "funcdefs.h" #define MAX_EXP 300.0 // NOTE... To set up a new project for CUDA, right-click the project name in // Solution Explorer, click Build Customizations, select CUDA. // Linker needs additional library directory $(CudaToolkitLibDir) // Also needs Input/ Additional dependencies cuda.lib;cudart.lib // This is used as intermediary between device's float and hosts double static float *fdata = NULL ; static int n_weights ; // Total number of weights across all layers static int n_weights_on_device ; // Ditto, but extended for 128-byte rows static int max_convgrad_work ; // Work area size (# of floats) for CONV gradient, = max_batch * max_convgrad_each static int max_batch ; // Max number of cases in a launched batch // This is strictly for printing memory allocation info for the user static double total_memory = 0.0 ; // These are for the reductions used in device_ll // The number of threads MUST be a power of two! // The number of blocks given here is a maximum. The actual number may be less. #define REDUC_THREADS 256 #define REDUC_BLOCKS 64 // This is for shared memory staging of convolution #define BLOCK_SIZE 32 static float *reduc_fdata = NULL ; // These are set in ?_cuda_init and used by the host routine that launches the kernel // They are basic app parameters, constant for all launches // Names that begin with d_ are in the device namespace. // Names that begin with h_ are in the host namespace and equal the device value. // This lets us save a little time by avoiding the need to pass a bunch of parameters in the launch. // We could, of course, just pass data pointers as parameters. But that's overhead. // So instead we use hipMemcpyToSymbol() to copy the values in the host namespace // to values on the device. This lets __global routines address the values that are // already set on the device rather than having to use passed parameters. // The savings is probably small, but worthwhile. __constant__ int d_ncases ; // Number of cases in complete training set __constant__ int d_img_rows ; // Number of rows in input image __constant__ int d_img_cols ; // Number of cols in input image __constant__ int d_img_bands ; // Number of bands in input image __constant__ int d_n_pred ; // Number of predictors __constant__ int d_n_classes ; // Number of classes __constant__ int d_n_classes_cols ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual) __constant__ int d_n_layers ; // Number of hidden layers; does not include output layer __constant__ int d_n_weights ; // Total number of weights across all layers __constant__ int d_convgrad_cols[MAX_LAYERS] ; // n_prior_weights[ilayer] bumped up to multiple of 32 __constant__ int d_max_convgrad_each ; // Max hid * convwts_cols in a CONV hid grad launch (work area per case) // This holds a single case // See the convgrad_work allocation section for details // max_convgrad_work = this times max_batch __constant__ int d_layer_type[MAX_LAYERS] ;// TYPE_? in CONST.H __constant__ int d_nhid[MAX_LAYERS] ; // Number of neurons in each of the hidden layers = height*width*depth __constant__ int d_nhid_cols[MAX_LAYERS] ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual) __constant__ int d_height[MAX_LAYERS] ; // Height (rows) of each layer __constant__ int d_width[MAX_LAYERS] ; // And width __constant__ int d_depth[MAX_LAYERS] ; // And number of slices __constant__ int d_depth_cols[MAX_LAYERS] ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual); for CONV only __constant__ int d_n_prior_weights[MAX_LAYERS] ; // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 // A CONV layer has this many weights per layer (slice); a LOCAL layer has this times its nhid __constant__ int d_HalfWidH[MAX_LAYERS] ; // Horizontal half width looking back to prior layer __constant__ int d_HalfWidV[MAX_LAYERS] ; // And vertical __constant__ int d_padH[MAX_LAYERS] ; // Horizontal padding, should not exceed half width __constant__ int d_padV[MAX_LAYERS] ; // And vertical __constant__ int d_strideH[MAX_LAYERS] ; // Horizontal stride __constant__ int d_strideV[MAX_LAYERS] ; // And vertical __constant__ int d_PoolWidH[MAX_LAYERS] ; // Horizontal pooling width looking back to prior layer __constant__ int d_PoolWidV[MAX_LAYERS] ; // And vertical static float *h_predictors = NULL ; // Raw training data; n_cases by n_pred __constant__ float *d_predictors ; static int *h_class = NULL ; // Class id is here __constant__ int *d_class ; static double *activations = NULL ; // Activations of this layer, which we compute __constant__ double *d_act[MAX_LAYERS] ; // Pointers to activation vector of each layer static double *h_output = NULL ; // Output activations __constant__ double *d_output ; static int *h_poolmax_id[MAX_LAYERS] ; // Used only for POOLMAX layer; saves from forward pass ID of max input for backprop pass __constant__ int *d_poolmax_id[MAX_LAYERS] ; // Pointers to id vector for each layer; NULL for other than POOLMAX layer static float *weights = NULL ; // All weights, including output layer __constant__ float *d_weights[MAX_LAYERS+1] ; // Pointers to weight vector of each layer, including output // WARNING... If gradient is ever double instead of float, see MLFN_CUDA.CPP for integer overflow check! static float *grad = NULL ; // Gradient for all weights, including output layer __constant__ float *d_grad[MAX_LAYERS+1] ; // Pointers to grad vector of each layer, including output // These are for the first case, and max_batch gradient sets are allocated static float *h_convgrad_work = NULL ; // Scratch for unflattened convolution layer gradient __constant__ float *d_convgrad_work ; static double *h_this_delta = NULL ; // Delta for current layer __constant__ double *d_this_delta ; static double *h_prior_delta = NULL ;// Delta for next layer back __constant__ double *d_prior_delta ; static float *h_ll_out = NULL ; __constant__ float *d_ll_out ; static hipDeviceProp_t deviceProp ; __global__ void device_hidden_activation_FC ( int istart , int istop , int ilayer ) ; __global__ void device_hidden_activation_LOCAL_CONV ( int local_vs_conv , int case_start , int case_offset , int slice_start , int n_slices , int ilayer ) ; __global__ void device_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , int istart , int ilayer ) ; __global__ void device_hidden_activation_POOLED ( int avg_vs_max , int istart , int ilayer ) ; __global__ void device_output_activation_no_hidden ( int istart ) ; __global__ void device_output_activation ( int istart ) ; __global__ void device_softmax ( int istart , int istop ) ; __global__ void device_ll () ; __global__ void device_output_delta ( int istart ) ; __global__ void device_output_gradient_no_hidden ( int istart , int nc ) ; __global__ void device_output_gradient ( int nc , int ilayer ) ; __global__ void device_backprop_delta_FC ( int ilayer ) ; __global__ void device_backprop_delta_nonpooled ( int ilayer ) ; __global__ void device_backprop_delta_pooled ( int ilayer ) ; __global__ void device_move_delta ( int nhid ) ; __global__ void device_hidden_gradient_FC ( int istart , int nc , int ilayer ) ; __global__ void device_hidden_gradient_LOCAL_CONV ( int local_vs_conv , int nfilt , int istart , int depth_offset , int n_depths , int ilayer ) ; __global__ void device_flatten_gradient ( int islice_start , int max_depth , int ilayer ) ; __global__ void device_zero_gradient ( int nc ) ; __global__ void device_fetch_gradient ( int nc ) ; /* ----------------------------------------------------- cuda_init() - Initialize for a model configuration ----------------------------------------------------- */ int cuda_init ( int n_cases , // Total number of cases int n_img_rows , // Number of rows in input image int n_img_cols , // Number of cols in input image int n_img_bands , // Number of bands in input image int n_pred , // Number of predictors int n_classes , // Number of classes double *data , // Ncases by (n_pred+n_classes) data array int max_batch_size , // Max number of cases that caller wants in a single launch int max_hid_grad , // Max hid in a CONV hid grad launch; multiple of height*width; <= 65536 int max_mem_grad , // Max memory (bytes) used for CONV scratch storage, which has the potential to be huge int n_all_wts , // Total number of weights (all layers, including output, and all bias terms) int n_layers , // Number of layers, not including final int layer_type[MAX_LAYERS] , // Each entry (input to final) is TYPE_? in CONST.H int nhid[MAX_LAYERS] , // Total number of neurons in this layer = height times width times depth int n_prior_weights[MAX_LAYERS] , // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 // A CONV layer has this many weights per layer (slice); a LOCAL layer has this times its nhid int height[MAX_LAYERS] , // Number of neurons vertically in a slice of this layer, 1 if fully connected int width[MAX_LAYERS] , // Ditto horizontal int depth[MAX_LAYERS] , // Number of hidden neurons if fully connected, else number of slices in this layer int HalfWidH[MAX_LAYERS] , // Horizontal half width looking back to prior layer int HalfWidV[MAX_LAYERS] , // And vertical int padH[MAX_LAYERS] , // Horizontal padding, should not exceed half width int padV[MAX_LAYERS] , // And vertical int strideH[MAX_LAYERS] , // Horizontal stride int strideV[MAX_LAYERS] , // And vertical int PoolWidH[MAX_LAYERS] , // Horizontal pooling width looking back to prior layer int PoolWidV[MAX_LAYERS] , // And vertical char *error_msg // Returns text of error if problem ) { int i, j, ilayer, irow, icol, iband, ncols, memsize, n_total, n_max, n_classes_cols ; int nhid_cols[MAX_LAYERS], depth_cols[MAX_LAYERS], convgrad_cols[MAX_LAYERS] ; int *iclass, ibest, divisor, threads_per_block, batch_size_limit ; double best, *xptr, *dptr[MAX_LAYERS+1] ; float *fptr[MAX_LAYERS+1] ; char msg[256] ; hipError_t error_id ; MEMTEXT ( "MOD_CUDA.cu: cuda_init starting" ) ; cudalog ( "" ) ; max_batch = max_batch_size ; /* Initialize CUDA timers */ for (ilayer=0 ; ilayer<=MAX_LAYERS ; ilayer++) { CudaTimers.ncalls_act[ilayer] = 0 ; CudaTimers.act[ilayer] = 0 ; CudaTimers.ncalls_delta[ilayer] = 0 ; CudaTimers.delta[ilayer] = 0 ; CudaTimers.ncalls_grad[ilayer] = 0 ; CudaTimers.grad[ilayer] = 0 ; } CudaTimers.ncalls_weights = 0 ; CudaTimers.weights = 0 ; CudaTimers.ncalls_softmax = 0 ; CudaTimers.softmax = 0 ; CudaTimers.ncalls_ll = 0 ; CudaTimers.ll = 0 ; CudaTimers.ncalls_movedelta = 0 ; CudaTimers.movedelta = 0 ; CudaTimers.ncalls_fetchgrad = 0 ; CudaTimers.fetchgrad = 0 ; error_id = hipSetDevice ( cuda_present - 1 ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init SetDevice failed %d: %s", error_id, hipGetErrorString(error_id) ) ; MEMTEXT ( error_msg ) ; audit ( error_msg ) ; cuda_enable = 0 ; return ERROR_CUDA_ERROR ; } hipGetDeviceProperties ( &deviceProp , 0 ) ; /* Constants We also keep nhid_cols, which is the neurons counts bumped up to multiples of 32 (actual) so as to keep rows of weight matrices starting on 128-byte boundaries. Ditto for output weights. For CONV layers, we bump up depth because every neuron in visual field (height*width) has the same weights in a given slice. */ n_weights = n_all_wts ; ncols = n_pred + n_classes ; n_classes_cols = (n_classes + 31) / 32 * 32 ; // For memory alignment of weights to 128 bytes // This applies to only output weights for (i=0 ; i<n_layers ; i++) { nhid_cols[i] = (nhid[i] + 31) / 32 * 32 ; depth_cols[i] = (depth[i] + 31) / 32 * 32 ; h_poolmax_id[i] = NULL ; } hipMemcpyToSymbol ( d_ncases , &n_cases , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_img_rows , &n_img_rows , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_img_cols , &n_img_cols , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_img_bands , &n_img_bands , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_pred , &n_pred , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_classes , &n_classes , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_classes_cols , &n_classes_cols , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_layers , &n_layers , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_weights , &n_weights , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_nhid , nhid , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_nhid_cols , nhid_cols , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_layer_type , layer_type , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_height , height , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_width , width , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_depth , depth , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_depth_cols , depth_cols , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_n_prior_weights , n_prior_weights , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_HalfWidH , HalfWidH , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_HalfWidV , HalfWidV , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_padH , padH , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_padV , padV , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_strideH , strideH , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_strideV , strideV , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_PoolWidH , PoolWidH , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_PoolWidV , PoolWidV , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; /* Set shared memory / cache preferences */ hipFuncSetCacheConfig ( device_hidden_activation_FC , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_hidden_activation_LOCAL_CONV , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_hidden_activation_LOCAL_CONV_shared , hipFuncCachePreferNone ) ; hipFuncSetCacheConfig ( device_hidden_activation_POOLED , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_output_activation_no_hidden , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_output_activation , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_softmax , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_ll , hipFuncCachePreferNone ) ; hipFuncSetCacheConfig ( device_output_delta , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_output_gradient_no_hidden , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_output_gradient , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_backprop_delta_FC , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_backprop_delta_nonpooled , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_backprop_delta_pooled , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_move_delta , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_hidden_gradient_FC , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_hidden_gradient_LOCAL_CONV , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_flatten_gradient , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_zero_gradient , hipFuncCachePreferL1 ) ; hipFuncSetCacheConfig ( device_fetch_gradient , hipFuncCachePreferL1 ) ; /* Predictors - We extract only the first n_pred columns from the n_pred+n_classes columns in data Reorder them so band changes fastest */ fdata = (float *) MALLOC ( n_cases * n_pred * sizeof(float) ) ; if (fdata == NULL) return ERROR_INSUFFICIENT_MEMORY ; memsize = n_cases * n_pred * sizeof(float) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_predictors , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC predictors = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_predictors, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc predictors (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } j = 0 ; for (i=0 ; i<n_cases ; i++) { xptr = data + i * ncols ; for (irow=0 ; irow<n_img_rows ; irow++) { for (icol=0 ; icol<n_img_cols ; icol++) { for (iband=0 ; iband<n_img_bands ; iband++) fdata[j++] = (float) xptr[(iband*n_img_rows+irow)*n_img_cols+icol] ; } } } assert ( j == n_cases * n_pred ) ; error_id = hipMemcpy ( h_predictors , fdata , n_cases * n_pred * sizeof(float) , hipMemcpyHostToDevice ) ; FREE ( fdata ) ; fdata = NULL ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_predictors , &h_predictors , sizeof(float *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad predictors copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Classes; we convert the 1/0 binary output target vector to integer classes */ iclass = (int *) MALLOC ( n_cases * sizeof(int) ) ; if (iclass == NULL) return ERROR_INSUFFICIENT_MEMORY ; memsize = n_cases * sizeof(int) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_class , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC class = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_class, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc class (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } for (i=0 ; i<n_cases ; i++) { best = -1.e60 ; xptr = data + i * ncols + n_pred ; for (j=0 ; j<n_classes ; j++) { if (xptr[j] > best) { best = xptr[j] ; ibest = j ; } } iclass[i] = ibest ; } error_id = hipMemcpy ( h_class , iclass , n_cases * sizeof(int) , hipMemcpyHostToDevice ) ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_class , &h_class , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad class copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } FREE ( iclass ) ; /* Activations (hidden layers only) ordered (row, col, slice) */ if (n_layers) { n_total = 0 ; for (i=0 ; i<n_layers ; i++) // All hidden layers, but not output n_total += nhid[i] ; memsize = n_total * max_batch * sizeof(double) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &activations , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC activations = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) activations, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc activations (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (i=0 ; i<n_layers ; i++) { dptr[i] = activations + n_total * max_batch ; n_total += nhid[i] ; } error_id = hipMemcpyToSymbol ( d_act , &dptr[0] , n_layers * sizeof(double *) , 0 , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad act ptr copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } } else activations = NULL ; /* poolmax_id (POOLMAX layers only) ordered (row, col, slice) */ for (ilayer=0 ; ilayer<n_layers ; ilayer++) { if (layer_type[ilayer] == TYPE_POOLMAX) { memsize = nhid[ilayer] * max_batch * sizeof(int) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_poolmax_id[ilayer] , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC Layer %d poolmax_id = %llx (%d bytes, total=%.2lf MB)", ilayer, (unsigned long long) h_poolmax_id[ilayer], memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc poolmax_id (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } } else h_poolmax_id[ilayer] = NULL ; } error_id = hipMemcpyToSymbol ( d_poolmax_id , &h_poolmax_id[0] , n_layers * sizeof(int *) , 0 , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad poolmax_id ptr copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Outputs */ memsize = n_cases * n_classes * sizeof(double) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_output , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC output = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_output, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_output , &h_output , sizeof(float *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc output (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } /* Weights These are stored as the transpose of those in Host, with the neurons in the 'current' layer changing fastest. Within each layer's weight matrix, rows (sets of current layer weights) are stored starting on 128-byte boundaries. Thus, n_weights_on_device is generally larger than n_weights, because it takes into account row padding. Neuron layout in each layer is (height, width, depth). */ n_weights_on_device = 0 ; for (ilayer=0 ; ilayer<= n_layers ; ilayer++) { // For each of the hidden layers, plus the final if (ilayer == n_layers) n_weights_on_device += n_classes_cols * n_prior_weights[ilayer] ; else if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_weights_on_device += nhid_cols[ilayer] * n_prior_weights[ilayer] ; // Add in weights for this layer else if (layer_type[ilayer] == TYPE_CONV) n_weights_on_device += depth_cols[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same weights for every hidden neuron in visible field else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_weights_on_device += 0 ; // Just for clarity; pooling has no trainable weights } // For ilayer memsize = n_weights_on_device * sizeof(float) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &weights , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC weights = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) weights, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc weights (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (ilayer=0 ; ; ilayer++) { // For each of the hidden layers, plus the final fptr[ilayer] = weights + n_total ; if (ilayer >= n_layers) break ; if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_total += nhid_cols[ilayer] * n_prior_weights[ilayer] ; // Add in weights for this layer else if (layer_type[ilayer] == TYPE_CONV) n_total += depth_cols[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same weights for every hidden neuron in visible field in a slice else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_total += 0 ; // Just for clarity; pooling has no trainable weights } // For ilayer error_id = hipMemcpyToSymbol ( d_weights , &fptr[0] , (n_layers+1) * sizeof(float *) , 0 , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad weight ptr copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Gradient We allocate for max_batch complete gradient vectors, and d_grad will be pointers to the first set. Subsequent sets are addressed by adding k * n_weights to the first set. */ memsize = n_weights * max_batch * sizeof(float) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &grad , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC grad = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) grad, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc grad (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (ilayer=0 ; ; ilayer++) { // For each of the hidden layers, plus the final fptr[ilayer] = grad + n_total ; if (ilayer >= n_layers) break ; if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_total += nhid[ilayer] * n_prior_weights[ilayer] ; // Add in grad for this layer else if (layer_type[ilayer] == TYPE_CONV) n_total += depth[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same grad for every hidden neuron in visible field else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_total += 0 ; // Just for clarity; pooling has no trainable grad } // For ilayer (each hidden layer) error_id = hipMemcpyToSymbol ( d_grad , &fptr[0] , (n_layers+1) * sizeof(float *) , 0 , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad weight ptr copy %d: %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* convgrad_work - Scratch vector for unflattened convolution layer gradient */ max_convgrad_work = 0 ; // Will find the max work area needed for (ilayer=0 ; ilayer<n_layers ; ilayer++) { if (layer_type[ilayer] == TYPE_CONV) { convgrad_cols[ilayer] = (n_prior_weights[ilayer] + 31) / 32 * 32 ; // CONV scratch is zero padded for full coalescing n_max = 1024 * 1024 * max_mem_grad / (max_batch * convgrad_cols[ilayer] * sizeof(float)) ; // Launch limit satisfying memory divisor = 1 ; // Figure out how much we have to divide slices to meet max_hid_grad and max_mem_grad limits for ( ;; ) { j = depth[ilayer] / divisor * height[ilayer] * width[ilayer] ; // We will launch this many hid at a time if (j <= max_hid_grad && j <= n_max) break ; ++divisor ; } j = depth[ilayer] / divisor * height[ilayer] * width[ilayer] ; // We will launch this many hid at a time if (j < height[ilayer] * width[ilayer]) // Careless user specified it too small, so ignore request j = height[ilayer] * width[ilayer] ; // At this time, j is the number of hidden neurons per launch if (j * convgrad_cols[ilayer] > max_convgrad_work) max_convgrad_work = j * convgrad_cols[ilayer] ; // This many weights will be computed in a launch (per case) // Print info for user cudalog ( "" ) ; sprintf_s ( msg, "Gradient computation for layer %d will use %d launches, each max %d hidden neurons", ilayer+1, (depth[ilayer] * height[ilayer] * width[ilayer] + j - 1) / j, j ) ; cudalog ( msg ) ; threads_per_block = (n_prior_weights[ilayer] + 31) / 32 * 32 ; if (threads_per_block > 4 * 32) threads_per_block = 4 * 32 ; sprintf_s ( msg, "Launch parameters: Threads per block=%d with %d thread (x) blocks", threads_per_block, (n_prior_weights[ilayer] + threads_per_block - 1) / threads_per_block) ; cudalog ( msg ) ; sprintf_s ( msg, " Max Y dimension (n hidden) = %d; max Z dimension (cases) = %d", j, max_batch_size ) ; cudalog ( msg ) ; } else convgrad_cols[ilayer] = 0 ; // Not needed } hipMemcpyToSymbol ( d_max_convgrad_each , &max_convgrad_work , sizeof(int) , 0 , hipMemcpyHostToDevice ) ; hipMemcpyToSymbol ( d_convgrad_cols , convgrad_cols , n_layers * sizeof(int) , 0 , hipMemcpyHostToDevice ) ; // For storing gradient, we need prior weights and cases in batch if (max_convgrad_work) { // Must not have integer overflow in memory size // At this moment, max_convgrad_work is the max number of weights (neurons times prior) in a launch batch_size_limit = MAXPOSNUM / (max_convgrad_work * sizeof(float)) ; // Memory allocation size if (max_batch > batch_size_limit) { audit ( "ERROR... User specified number of training cases per subset too large. Please reduce." ) ; cudalog ( "Device initialization error: training cases per subset too large." ) ; sprintf_s ( error_msg , 255 , "User ERROR: Architecture and CUDA params limit subset to %d cases", batch_size_limit ) ; return ERROR_CUDA_MEMORY ; } max_convgrad_work *= max_batch ; memsize = max_convgrad_work * sizeof(float) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_convgrad_work , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC convgrad_work = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_convgrad_work, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_convgrad_work , &h_convgrad_work , sizeof(float *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc convgrad_work (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } } else h_convgrad_work = NULL ; /* This delta, next delta */ n_max = n_classes ; for (i=0 ; i<n_layers ; i++) { if (nhid[i] > n_max) n_max = nhid[i] ; } memsize = n_max * max_batch * sizeof(double) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_this_delta , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC this_delta = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_this_delta, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_this_delta , &h_this_delta , sizeof(double *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc this_delta (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } memsize = n_max * max_batch * sizeof(double) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_prior_delta , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC prior_delta = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_prior_delta, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == hipSuccess) error_id = hipMemcpyToSymbol ( d_prior_delta , &h_prior_delta , sizeof(double *) , 0 , hipMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc prior_delta (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } /* Log likelihood reduction stuff */ memsize = REDUC_BLOCKS * sizeof(float) ; total_memory += memsize ; error_id = hipMalloc ( (void **) &h_ll_out , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC ll_out = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_ll_out, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != hipSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc ll_out (%d): %s", error_id, hipGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } hipMemcpyToSymbol ( d_ll_out , &h_ll_out , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ; MEMTEXT ( "CUDA init reduc_fdata" ) ; reduc_fdata = (float *) MALLOC ( REDUC_BLOCKS * sizeof(float) ) ; if (reduc_fdata == NULL) { sprintf_s ( error_msg , 255 , "CUDA init bad MALLOC reduc_fdata" ) ; return ERROR_INSUFFICIENT_MEMORY ; // New error return } /* Allocate fdata large enough to handle all subsequent double <-> float transactions This remains allocated until cuda_cleanup() is called, because it is used often in launches. */ n_max = max_convgrad_work ; if (n_weights_on_device > n_max) n_max = n_weights_on_device ; fdata = (float *) MALLOC ( n_max * sizeof(float) ) ; if (fdata == NULL) return ERROR_INSUFFICIENT_MEMORY ; MEMTEXT ( "MOD_CUDA.cu: cuda_init ending" ) ; return 0 ; } /* -------------------------------------------------------------------------------- cuda_weights_to_device - Called from MOD_CUDA.CPP to copy weights HOST weights: In a CONV layer, weight order is: Layer depth Input slice Input height Input width Bias In a LOCAL layer, weight order is: Layer depth Layer height Layer width Input slice Input height Input width Bias CUDA weights: In a CONV layer, weight order is: Input height Input width Input slice Bias Layer depth Pad so layer depth is a multiple of 128 In a LOCAL layer, weight order is: Input height Input width Input slice Bias Layer height Layer width Layer depth Pad so nhid = layer height*width*depth is a multiple of 128 A fully connected layer has height=width=1; all neurons are depth. -------------------------------------------------------------------------------- */ int cuda_weights_to_device ( int n_classes , // Number of outputs int n_layers , // Hidden layers; does not include output int *layer_type , // Each entry (input to final) is TYPE_? in CONST.H int img_rows , // Size of input image int img_cols , int img_bands , int *height , // Height of visible field in each layer int *width , // Width of visible field int *depth , // Number of slices in each layer int *nhid , // Number of hidden neurons in each layer int *hwH , // Half-width of filters int *hwV , double **host_weights ) // Vector of pointers to weights for each layer { int n, n_prior, ilayer, ineuron, isub, n_cols_each ; int idepth, iheight, iwidth, ndepth, nheight, nwidth ; int in_row, in_col, in_slice, in_n_height, in_n_width, in_n_depth ; double *wptr ; float *fptr ; char msg[256] ; hipError_t error_id ; fptr = fdata ; for (ilayer=0 ; ilayer<=n_layers ; ilayer++) { wptr = host_weights[ilayer] ; /* Fully connected */ if (ilayer == n_layers || layer_type[ilayer] == TYPE_FC) { if (ilayer == 0) { in_n_height = img_rows ; in_n_width = img_cols ; in_n_depth = img_bands ; } else { in_n_height = height[ilayer-1] ; in_n_width = width[ilayer-1] ; in_n_depth = depth[ilayer-1] ; } n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias if (ilayer == n_layers) n = n_classes ; // Equals depth else n = nhid[ilayer] ; // Equals depth n_cols_each = (n + 31) / 32 * 32 ; // For memory alignment to 128 bytes for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (idepth=0 ; idepth<n ; idepth++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row // Bias for (idepth=0 ; idepth<n ; idepth++) { // Compute location of this neuron's bias in host isub = idepth * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } /* LOCAL */ else if (layer_type[ilayer] == TYPE_LOCAL) { // For LOCAL layers, neuron layout in current layer is (height, width, depth). n = nhid[ilayer] ; n_cols_each = (n + 31) / 32 * 32 ; // For memory alignment to 128 bytes ndepth = depth[ilayer] ; nheight = height[ilayer] ; nwidth = width[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth } // For iwidth } // For iheight ineuron = nhid[ilayer] ; while (ineuron++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row // Bias for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight vector in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth } // For iwidth } // For iheight ineuron = nhid[ilayer] ; while (ineuron++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } /* CONV */ else if (layer_type[ilayer] == TYPE_CONV) { nheight = height[ilayer] ; nwidth = width[ilayer] ; ndepth = depth[ilayer] ; n_cols_each = (ndepth + 31) / 32 * 32 ; // For memory alignment to 128 bytes in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row //Bias for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's bias in host isub = idepth * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } else if (layer_type[ilayer] == TYPE_POOLAVG || layer_type[ilayer] == TYPE_POOLMAX) { n = 0 ; // Not needed. Just for clarity. } } // For ilayer assert ( fptr == fdata + n_weights_on_device ) ; error_id = hipMemcpy ( weights , fdata , n_weights_on_device * sizeof(float) , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "CUDA ERROR: bad weights_to_device hid %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( "" ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return ERROR_CUDA_ERROR ; } return 0 ; } /* -------------------------------------------------------------------------------- hidden_activation_FC - Compute activations for an FC hidden layer -------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_FC ( int istart , // First case in this batch int istop , // One past last case int ilayer // Layer to process ) { int icase, ihid, i_input, n_inputs, nhid_cols ; float *f_inptr, *wptr ; double sum, *actptr, *d_inptr ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; nhid_cols = d_nhid_cols[ilayer] ; icase = blockIdx.y ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest sum = 0.0 ; if (ilayer == 0) { n_inputs = d_n_pred ; f_inptr = d_predictors + (icase+istart)*n_inputs ; for (i_input=0 ; i_input<n_inputs ; i_input++) { sum += *wptr * f_inptr[i_input] ; wptr += nhid_cols ; } sum += *wptr ; // Bias } else { n_inputs = d_nhid[ilayer-1] ; d_inptr = d_act[ilayer-1] + icase*n_inputs ; for (i_input=0 ; i_input<n_inputs ; i_input++) { sum += *wptr * d_inptr[i_input] ; wptr += nhid_cols ; } sum += *wptr ; // Bias } if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } actptr = d_act[ilayer] ; actptr[icase*d_nhid[ilayer]+ihid] = sum ; } int cuda_hidden_activation_FC ( int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_hidden_activation_FC) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart , istop , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_FC launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* --------------------------------------------------------------------------------- hidden_activation_LOCAL_CONV - Activations for a LOCAL or CONV hidden layer --------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int case_start , // First case in this batch (relative to dataset) int case_offset , // Offset relative to this batch (used in shared version) int slice_start , // First slice in this batch int n_slices , // Number of slices to be done in this launch int ilayer // Layer to process ) { int kwt, kin, wtsub, insub, iheight, iwidth, idepth, n_height, n_width, n_depth, wt_cols, ihid ; int rstart, rstop, cstart, cstop, rbase, cbase, in_slice, in_row, in_col, nH ; float *f_inptr, *wptr ; double sum, *actptr ; idepth = blockIdx.x * blockDim.x + threadIdx.x ; if (idepth >= n_slices) return ; idepth += slice_start ; iheight = blockIdx.y / d_width[ilayer] ; iwidth = blockIdx.y % d_width[ilayer] ; nH = 2 * d_HalfWidH[ilayer] + 1 ; // We are about to compute the activation of neuron (iheight, iwidth, idepth) in this layer. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. // Also, the weights are ordered so that depth-fastest changes produce perfect or very good coalescing. // Thus, neuron layout in current layer is (height, width, depth). // This gives strong motivation for LOCAL layers to have depth a multiple of 32. // To see why, note the ihid= below. That multiplication ensures perfect coalescing of the weight fetches. // icase = blockIdx.z ; // Avoid having to declare this (and use a register) by directly referencing it later if (local_vs_conv) { wt_cols = d_nhid_cols[ilayer] ; // Padded size of weight matrix rows; each has nhid data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) ihid = (iheight * d_width[ilayer] + iwidth) * d_depth[ilayer] + idepth ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest } // Order is (height, width, depth) else { wt_cols = d_depth_cols[ilayer] ; // Padded size of weight matrix rows; each has depth[ilayer] data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) // A convolutional layer has a different weight set for each slice, // but the same weight set for all neurons (visual field placement) in a slice. wptr = d_weights[ilayer] + idepth ; // First filter weight for this slice is here; subsequent weights spaced by wt_cols } sum = 0.0 ; // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. rbase = rstart = d_strideV[ilayer] * iheight - d_padV[ilayer] ; rstop = rstart + 2 * d_HalfWidV[ilayer] ; cbase = cstart = d_strideH[ilayer] * iwidth - d_padH[ilayer] ; cstop = cstart + 2 * d_HalfWidH[ilayer] ; if (rstart < 0) rstart = 0 ; if (cstart < 0) cstart = 0 ; // It's annoying and messy, but we must duplicate the same code for the case of this being the // first hidden layer (fed by the input) versus a subsequent hidden layer (fed by prior activations). // This is because the input uses a float pointer, and activations a double pointer. // Deciding in the inner loop would be too slow! if (ilayer == 0) { f_inptr = d_predictors + (blockIdx.z + case_offset + case_start) * d_n_pred ; if (rstop >= d_img_rows) rstop = d_img_rows - 1 ; if (cstop >= d_img_cols) cstop = d_img_cols - 1 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { kwt = (in_row - rbase) * nH ; kin = in_row*d_img_cols ; for (in_col=cstart ; in_col<=cstop ; in_col++) { wtsub = (kwt + in_col - cbase) * d_img_bands ; insub = (kin+in_col) * d_img_bands ; for (in_slice=0 ; in_slice<d_img_bands ; in_slice++) { // wtsub = ((in_row - rbase) * nH + in_col - cbase) * d_img_bands + in_slice ; // insub = (in_row*d_img_cols+in_col)*d_img_bands+in_slice ; sum += f_inptr[insub] * wptr[wtsub*wt_cols] ; ++wtsub ; ++insub ; } // For in_slice } // For in_col } // For in_row sum += wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; // Bias } else { actptr = d_act[ilayer-1] + (blockIdx.z + case_offset) * d_nhid[ilayer-1] ; n_height = d_height[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; if (rstop >= n_height) rstop = n_height - 1 ; if (cstop >= n_width) cstop = n_width - 1 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { kwt = (in_row - rbase) * nH ; kin = in_row*n_width ; for (in_col=cstart ; in_col<=cstop ; in_col++) { wtsub = (kwt + in_col - cbase) * n_depth ; insub = (kin+in_col) * n_depth ; for (in_slice=0 ; in_slice<d_depth[ilayer-1] ; in_slice++) { // wtsub = ((in_row - rbase) * nH + in_col - cbase) * n_depth + in_slice ; // insub = (in_row*n_width+in_col)*n_depth+in_slice ; sum += actptr[insub] * wptr[wtsub*wt_cols] ; ++wtsub ; ++insub ; } // For in_slice } // For in_col } // For in_row sum += wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; // Bias } if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } n_height = d_height[ilayer] ; n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; actptr = d_act[ilayer] ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) actptr[(blockIdx.z+case_offset)*d_nhid[ilayer]+ihid] = sum ; } int cuda_hidden_activation_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = istop - istart ; hipLaunchKernelGGL(( device_hidden_activation_LOCAL_CONV) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, local_vs_conv , istart , 0 , 0 , n_slices , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } __global__ void device_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int ilayer // Layer to process ) { int k, iheight, iwidth, idepth, icase, n_height, n_width, n_depth, wt_cols ; int ihid, inner, n_inner, inner_blocks, prod ; int rstart, rstop, cstart, cstop, rbase, cbase, in_slice, in_row, in_col, isub, nH ; float *f_inptr, *wptr ; double value, sum, *actptr ; // In a block, threadIdx.x and threadIdx.y are the location within the BLOCK_SIZE square block. // The entire matrix of cases (rows) by slices (column) is divided into these blocks, // each of which is a launched block whose location in the entire matrix is given by blockIdx.x and blockIdx.y. // The sharing logic ignores blockIdx.z, which is just the location in the visual field. // The next four quantities identify the location within the entire matrix. idepth = blockIdx.x * BLOCK_SIZE + threadIdx.x ; icase = blockIdx.y * BLOCK_SIZE + threadIdx.y ; iheight = blockIdx.z / d_width[ilayer] ; iwidth = blockIdx.z % d_width[ilayer] ; nH = 2 * d_HalfWidH[ilayer] + 1 ; // Horizontal width of the filter // This thread will compute the activation of neuron (iheight, iwidth, idepth) for case icase. // However, the first step is for the threads in this block to cooperatively do the global // loads into shared memory of the weights and inputs relevant to this block. // We do this in a loop which covers the 'inner' (n_inner) dimension of the matrix multiplication. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. // Also, the weights are ordered so that depth-fastest changes produce perfect or very good coalescing. // Thus, neuron layout in current layer is (row, column, slice). // This gives strong motivation for LOCAL layers to have depth a multiple of 32. // To see why, note the ihid= below. That multiplication ensures perfect coalescing of the weight fetches. if (local_vs_conv) { wt_cols = d_nhid_cols[ilayer] ; // Padded size of weight matrix rows; each has nhid data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) ihid = (iheight * d_width[ilayer] + iwidth) * d_depth[ilayer] + idepth ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest } // Order is (height, width, depth) else { wt_cols = d_depth_cols[ilayer] ; // Padded size of weight matrix rows; each has depth[ilayer] data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) // A convolutional layer has a different weight set for each slice, // but the same weight set for all neurons (visual field placement) in a slice. wptr = d_weights[ilayer] + idepth ; // First filter weight for this slice is here; subsequent weights spaced by wt_cols } // Get a pointer to and the size of the prior-layer feeding this layer if (ilayer == 0) { f_inptr = d_predictors + (icase + istart) * d_n_pred ; n_height = d_img_rows ; n_width = d_img_cols ; n_depth = d_img_bands ; } else { actptr = d_act[ilayer-1] + icase * d_nhid[ilayer-1] ; n_height = d_height[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; } // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. // These quantities are independent of the depth (column here) and case (row here). rbase = rstart = d_strideV[ilayer] * iheight - d_padV[ilayer] ; rstop = rstart + 2 * d_HalfWidV[ilayer] ; cbase = cstart = d_strideH[ilayer] * iwidth - d_padH[ilayer] ; cstop = cstart + 2 * d_HalfWidH[ilayer] ; if (rstart < 0) rstart = 0 ; if (cstart < 0) cstart = 0 ; if (rstop >= n_height) rstop = n_height - 1 ; if (cstop >= n_width) cstop = n_width - 1 ; // The prep work is done. We now cooperatively do the global load. // This thread will handle Row threadIdx.y, Column threadIdx.x of the BLOCK_SIZE square block // in a loop over all inner blocks. // In each pass, we start by computing the ordinal position in the filter dot product loop. prod = (cstop-cstart+1) * n_depth ; // Each prior-layer row has this many elements n_inner = (rstop-rstart+1) * prod + 1 ; // This many terms in inner sum (+1 is for bias) inner_blocks = (n_inner + BLOCK_SIZE - 1) / BLOCK_SIZE ; // We will process this many 'inner' blocks sum = 0.0 ; for (inner=0 ; inner<inner_blocks ; inner++) { __shared__ double s_cases[BLOCK_SIZE][BLOCK_SIZE] ; __shared__ float s_slices[BLOCK_SIZE][BLOCK_SIZE] ; // Slice; We will sum over FIRST index (y) of s_slices isub = inner * BLOCK_SIZE + threadIdx.y ; // Ordinal position in filter dot product loop if (isub >= n_inner) // Outside inner block value = 0.0 ; else if (isub == n_inner-1) // Bias value = wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; else { in_row = isub / prod ; k = isub - in_row * prod ; in_col = k / n_depth ; in_slice = k % n_depth ; in_row += rstart ; in_col += cstart ; isub = ((in_row - rbase) * nH + in_col - cbase) * n_depth + in_slice ; value = wptr[isub*wt_cols] ; } s_slices[threadIdx.y][threadIdx.x] = value ; // Case; We will sum over SECOND index (x) of s_cases isub = inner * BLOCK_SIZE + threadIdx.x ; // Ordinal position in filter dot product loop if (isub >= n_inner) // Outside inner block value = 0.0 ; else if (isub == n_inner-1) // Bias value = 1.0 ; else { in_row = isub / prod ; k = isub - in_row * prod ; in_col = k / n_depth ; in_slice = k % n_depth ; in_row += rstart ; in_col += cstart ; isub = (in_row*n_width+in_col)*n_depth+in_slice ; if (ilayer == 0) value = f_inptr[isub] ; else value = actptr[isub] ; } s_cases[threadIdx.y][threadIdx.x] = value ; __syncthreads () ; for (k=0 ; k<BLOCK_SIZE ; k++) sum += s_cases[threadIdx.y][k] * s_slices[k][threadIdx.x] ; __syncthreads () ; } // For inner if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; actptr = d_act[ilayer] ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) actptr[icase*d_nhid[ilayer]+ihid] = sum ; // Perfectly coalesced if depth and nhid multiples of 32 } int cuda_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int nc, warpsize, threads_per_block ; char msg[256] ; dim3 thread_launch, block_launch ; hipError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; /* If possible (it normally would be), handle as much as possible with the more efficient shared-memory method. But if not, just use the non-shared method. */ nc = istop - istart ; if (n_slices < BLOCK_SIZE || nc < BLOCK_SIZE) return cuda_hidden_activation_LOCAL_CONV ( local_vs_conv , istart , istop , nhid , n_slices , ilayer ) ; thread_launch.x = BLOCK_SIZE ; thread_launch.y = BLOCK_SIZE ; thread_launch.z = 1 ; block_launch.x = n_slices / BLOCK_SIZE ; block_launch.y = nc / BLOCK_SIZE ; block_launch.z = nhid / n_slices ; // Height times width; visual field size hipLaunchKernelGGL(( device_hidden_activation_LOCAL_CONV_shared) , dim3(block_launch) , dim3(thread_launch) , 0, 0, local_vs_conv , istart , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV_shared launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } /* Clean up any extra slices */ if (n_slices % BLOCK_SIZE) { threads_per_block = n_slices % BLOCK_SIZE ; block_launch.x = 1 ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = nc ; hipLaunchKernelGGL(( device_hidden_activation_LOCAL_CONV) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, local_vs_conv , istart , 0 , n_slices / BLOCK_SIZE * BLOCK_SIZE , n_slices % BLOCK_SIZE , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch (shared 1) error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } /* Clean up any extra cases */ if (nc % BLOCK_SIZE) { warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices / BLOCK_SIZE * BLOCK_SIZE + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices / BLOCK_SIZE * BLOCK_SIZE + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = nc % BLOCK_SIZE ; hipLaunchKernelGGL(( device_hidden_activation_LOCAL_CONV) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, local_vs_conv , istart, nc / BLOCK_SIZE * BLOCK_SIZE , 0 , n_slices / BLOCK_SIZE * BLOCK_SIZE , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch (shared 2) error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } return 0 ; } /* --------------------------------------------------------------------------------- hidden_activation_POOLED - Activations for a POOLAVG or POOLMAX hidden layer --------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_POOLED ( int avg_vs_max , // Is this a POOLAVG (vs POOLMAX) layer? int istart , // First case in this batch int ilayer // Layer to process ) { int icase, iheight, iwidth, idepth, n_width, n_depth, ihid ; int rstart, rstop, cstart, cstop, in_row, in_col, *poolmax_id_ptr ; float *f_inptr ; double x, *actptr, value ; idepth = blockIdx.x * blockDim.x + threadIdx.x ; if (idepth >= d_depth[ilayer]) return ; n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; iheight = blockIdx.y / n_width ; iwidth = blockIdx.y % n_width ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) // We are about to compute the activation of neuron (iheight, iwidth, idepth) in this layer. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. icase = blockIdx.z ; rstart = d_strideV[ilayer] * iheight ; rstop = rstart + d_PoolWidV[ilayer] - 1 ; cstart = d_strideH[ilayer] * iwidth ; cstop = cstart + d_PoolWidH[ilayer] - 1 ; // It's annoying and messy, but we must duplicate the same code for the case of this being the // first hidden layer (fed by the input) versus a subsequent hidden layer (fed by prior activations). // This is because the input uses a float pointer, and activations a double pointer. // Deciding in the inner loop would be too slow! if (ilayer == 0) { f_inptr = d_predictors + (icase + istart) * d_n_pred ; if (avg_vs_max) { value = 0.0 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) value += f_inptr[(in_row*d_img_cols+in_col)*d_img_bands+idepth] ; } // For in_row value /= d_PoolWidV[ilayer] * d_PoolWidH[ilayer] ; } else { poolmax_id_ptr = &d_poolmax_id[ilayer][ihid] + icase * d_nhid[ilayer] ; value = -1.e60 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) { x = f_inptr[(in_row*d_img_cols+in_col)*d_img_bands+idepth] ; if (x > value) { value = x ; *poolmax_id_ptr = in_row * d_img_cols + in_col ; // Save id of max for backprop pass } } // For in_col } // For in_row } // POOLMAX } // If first hidden layer else { actptr = d_act[ilayer-1] + icase * d_nhid[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; if (avg_vs_max) { value = 0.0 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) value += actptr[(in_row*n_width+in_col)*n_depth+idepth] ; } // For in_row value /= d_PoolWidV[ilayer] * d_PoolWidH[ilayer] ; } else { poolmax_id_ptr = &d_poolmax_id[ilayer][ihid] + icase * d_nhid[ilayer] ; value = -1.e60 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) { x = actptr[(in_row*n_width+in_col)*n_depth+idepth] ; if (x > value) { value = x ; *poolmax_id_ptr = in_row * d_width[ilayer-1] + in_col ; // Save id of max for backprop pass } } // For in_col } // For in_row } // POOLMAX } actptr = d_act[ilayer] ; actptr[icase*d_nhid[ilayer]+ihid] = value ; } int cuda_hidden_activation_POOLED ( int avg_vs_max , // Is this a POOLAVG (vs POOLMAX) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = istop - istart ; hipLaunchKernelGGL(( device_hidden_activation_POOLED) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, avg_vs_max , istart , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_POOLED launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- output_activation_no_hidden - Compute activations for the output layer This version is for when there is no hidden layer. ----------------------------------------------------------------------------------- */ __global__ void device_output_activation_no_hidden ( int istart // First case in this batch ) { int icase, iout, i_input ; double sum ; float *wptr, *inptr ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; wptr = d_weights[0] + iout ; // Weights on device have current neuron changing fastest inptr = d_predictors + (icase + istart) * d_n_pred ; sum = 0.0 ; for (i_input=0 ; i_input<d_n_pred ; i_input++) { // Weights are transpose of Host, with target changing fastest sum += *wptr * inptr[i_input] ; wptr += d_n_classes_cols ; } sum += *wptr ; // Bias d_output[(icase+istart)*d_n_classes+iout] = sum ; } int cuda_output_activation_no_hidden ( int istart , // First case in this batch int istop // One past last case ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_classes + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_classes + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_output_activation_no_hidden) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_output_activation_no_hidden launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- output_activation - Compute activations for the output layer This version is for when there is at least one hidden layer. ----------------------------------------------------------------------------------- */ __global__ void device_output_activation ( int istart // First case in this batch; needed for output ) { int icase, iout, i_input, n_inputs ; double sum ; float *wptr ; double *inptr ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; // Activities are zero origin, not offset by istart wptr = d_weights[d_n_layers] + iout ; // Weights on device have current neuron changing fastest n_inputs = d_nhid[d_n_layers-1] ; inptr = d_act[d_n_layers-1] + icase * n_inputs ; sum = 0.0 ; for (i_input=0 ; i_input<n_inputs ; i_input++) { // Weights are transpose of Host, with target changing fastest sum += *wptr * inptr[i_input] ; wptr += d_n_classes_cols ; } sum += *wptr ; // Bias d_output[(icase+istart)*d_n_classes+iout] = sum ; } int cuda_output_activation ( int istart , // First case in this batch int istop // One past last case ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_classes + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_classes + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_output_activation) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_output_activation launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- softmax - Do SoftMax modification of outputs for a batch -------------------------------------------------------------------------------- */ __global__ void device_softmax ( int istart , // First case in this batch int istop // One past last case ) { int icase, iout ; double *outptr, sum ; icase = blockIdx.x * blockDim.x + threadIdx.x ; if (icase >= istop - istart) return ; outptr = d_output + (icase + istart) * d_n_classes ; // Output vector for this case sum = 0.0 ; for (iout=0 ; iout<d_n_classes ; iout++) { if (outptr[iout] < MAX_EXP) outptr[iout] = exp ( outptr[iout] ) ; else outptr[iout] = exp ( MAX_EXP ) ; sum += outptr[iout] ; } for (iout=0 ; iout<d_n_classes ; iout++) outptr[iout] /= sum ; } int cuda_softmax ( int istart , // First case in this batch int istop // One past last case ) { int n, warpsize, blocks_per_grid, threads_per_block ; char msg[256] ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future n = istop - istart ; // Number of elements threads_per_block = (n + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; blocks_per_grid = (n + threads_per_block - 1) / threads_per_block ; hipLaunchKernelGGL(( device_softmax) , dim3(blocks_per_grid) , dim3(threads_per_block) , 0, 0, istart , istop ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_softmax launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ------------------------------------------------------------------------------------------------ cuda_cpx_ll - Given output activations and true classes, compute log likelihood This would be called after the entire training set is processed, not in batches. ------------------------------------------------------------------------------------------------ */ __global__ void device_ll () { __shared__ double partial_ll[REDUC_THREADS] ; int i, n, n_classes, index ; double sum_ll ; index = threadIdx.x ; n = d_ncases ; n_classes = d_n_classes ; sum_ll = 0.0 ; for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) sum_ll -= log ( d_output[i*n_classes+d_class[i]] + 1.e-30 ) ; partial_ll[index] = sum_ll ; __syncthreads() ; for (i=blockDim.x>>1 ; i ; i>>=1) { if (index < i) partial_ll[index] += partial_ll[index+i] ; __syncthreads() ; } if (index == 0) d_ll_out[blockIdx.x] = partial_ll[0] ; } int cuda_ll ( int n , // Number of values; n_cases double *ll // Computed log likelihood ) { int i, blocks_per_grid ; double sum ; char msg[256] ; hipError_t error_id ; blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ; if (blocks_per_grid > REDUC_BLOCKS) blocks_per_grid = REDUC_BLOCKS ; hipLaunchKernelGGL(( device_ll) , dim3(blocks_per_grid) , dim3(REDUC_THREADS) , 0, 0, ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_ll launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } error_id = hipMemcpy ( reduc_fdata , h_ll_out , blocks_per_grid * sizeof(float) , hipMemcpyDeviceToHost ) ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_ll Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } sum = 0.0 ; for (i=0 ; i<blocks_per_grid ; i++) sum += reduc_fdata[i] ; *ll = sum ; return 0 ; } /* -------------------------------------------------------------------------------- output_delta - Put output delta into this_delta -------------------------------------------------------------------------------- */ __global__ void device_output_delta ( int istart // First case in this batch ) { int icase, iout ; double target ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; target = (iout == d_class[istart+icase]) ? 1.0 : 0.0 ; // The output matrix has all training cases, hence we add istart, but delta is relative to this batch. d_this_delta[icase*d_n_classes+iout] = target - d_output[(istart+icase)*d_n_classes+iout] ; } int cuda_output_delta ( int istart , // First case in this batch int istop , // One past last case int ntarg // Number of targets (outputs, classes) ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (ntarg + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (ntarg + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_output_delta) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_output_delta launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- output_gradient - Compute output layer gradient -------------------------------------------------------------------------------- */ __global__ void device_output_gradient_no_hidden ( int istart , // Index of first case in this batch int nc // Number of cases in batch ) { int icase, iin ; float *gptr ; double input ; iin = blockIdx.x * blockDim.x + threadIdx.x ; if (iin > d_n_pred) return ; icase = blockIdx.y ; if (iin < d_n_pred) input = d_predictors[(istart+icase)*d_n_pred+iin] ; else input = 1.0 ; // Bias // iout = blockIdx.z ; We directly use this below gptr = d_grad[0] + icase * d_n_weights ; // Gradient of output layer gptr[blockIdx.z*(d_n_pred+1)+iin] = d_this_delta[icase*d_n_classes+blockIdx.z] * input ; } __global__ void device_output_gradient ( int nc , // Number of cases in batch int ilayer // Hidden layer which feeds the output layer ) { int icase, ihid, nhid ; float *gptr ; double input ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; nhid = d_nhid[ilayer] ; // Neurons in last hidden layer if (ihid > nhid) return ; icase = blockIdx.y ; if (ihid < nhid) input = d_act[ilayer][icase*nhid+ihid] ; else input = 1.0 ; // Bias // iout = blockIdx.z ; We directly use this below gptr = d_grad[ilayer+1] + icase * d_n_weights ; // Gradient of output layer gptr[blockIdx.z*(nhid+1)+ihid] = d_this_delta[icase*d_n_classes+blockIdx.z] * input ; } int cuda_output_gradient ( int istart , // Index of first case in this batch int nc , // Number of cases in batch int nin , // Number of inputs to last layer int ilayer , // Hidden layer which feeds the output layer int ntarg // Number of targets (outputs, classes) ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nin + 1 + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nin + 1 + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nc ; block_launch.z = ntarg ; if (ilayer < 0) hipLaunchKernelGGL(( device_output_gradient_no_hidden) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart , nc ) ; else hipLaunchKernelGGL(( device_output_gradient) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc , ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_output_gradient launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_FC - Backpropagate delta from a fully connected hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_FC ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is FC ) { int j, icase, ihid, nhid, n_next ; float *next_weights ; double *delta_ptr, *prior_delta_ptr, this_act, delta ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; nhid = d_nhid[ilayer] ; // Neurons in this hidden layer if (ihid >= nhid) return ; icase = blockIdx.y ; if (ilayer == d_n_layers-1) { n_next = d_n_classes ; next_weights = d_weights[ilayer+1] + ihid * d_n_classes_cols ; } else { n_next = d_nhid[ilayer+1] ; next_weights = d_weights[ilayer+1] + ihid * d_nhid_cols[ilayer+1] ; } delta_ptr = d_this_delta + icase * n_next ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * nhid ; // Save for the next layer to do, one layer back delta = 0.0 ; for (j=0 ; j<n_next ; j++) delta += delta_ptr[j] * next_weights[j] ; // Weights are transpose of host; later layer changes fastest if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*nhid+ihid] ; delta *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = delta ; // Save it for doing the next layer back } int cuda_backprop_delta_FC ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_backprop_delta_FC) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_FC launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_nonpooled - Backpropagate delta from a locally connected or convolutional hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_nonpooled ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is LOCAL or CONV ) { int k, icase, ihid, next_row, next_col, next_slice, this_row, this_col, this_slice ; int nH, k_next, wt_cols, rstart, cstart, prod, ltype ; int strideH, strideV, padH, padV, height, width, depth ; int next_rstart, next_rstop, next_cstart, next_cstop ; float *weights, *wtptr ; double *this_delta_ptr, *prior_delta_ptr ; double this_act, sum ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; prod = d_width[ilayer] * d_depth[ilayer] ; // Get the 3D coordinates of neuron 'ihid' this_row = ihid / prod ; k = ihid - this_row * prod ; this_col = k / d_depth[ilayer] ; this_slice = k % d_depth[ilayer] ; icase = blockIdx.y ; nH = 2 * d_HalfWidH[ilayer+1] + 1 ; // Horizontal filter size this_delta_ptr = d_this_delta + icase * d_nhid[ilayer+1] ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * d_nhid[ilayer] ; // Save for the next layer to do, one layer back ltype = d_layer_type[ilayer+1] ; strideV = d_strideV[ilayer+1] ; strideH = d_strideH[ilayer+1] ; padV = d_padV[ilayer+1] ; padH = d_padH[ilayer+1] ; height = d_height[ilayer+1] ; width = d_width[ilayer+1] ; depth = d_depth[ilayer+1] ; // this >= next * stride - pad IMPLIES next <= (this + pad) / stride // this <= next * stride - pad + 2 * hw IMPLIES next >= (this + pad - 2 * hw) / stride // We can safely do this in integer arithmetic next_rstop = this_row + padV ; k = next_rstart = next_rstop - 2 * d_HalfWidV[ilayer+1] ; next_rstop /= strideV ; next_rstart /= strideV ; if (k >= 0 && k % strideV) // If the division above was inexact, ++next_rstart ; // we must by pass the fractional part if (next_rstop >= height) next_rstop = height - 1 ; if (next_rstart < 0) next_rstart = 0 ; next_cstop = this_col + padH ; k = next_cstart = next_cstop - 2 * d_HalfWidH[ilayer+1] ; next_cstop /= strideH ; next_cstart /= strideH ; if (k >= 0 && k % strideH) ++next_cstart ; if (next_cstop >= width) next_cstop = width - 1 ; if (next_cstart < 0) next_cstart = 0 ; weights = d_weights[ilayer+1] ; if (ltype == TYPE_CONV) // A CONV layer has the same weight set for all neurons in visible field wt_cols = d_depth_cols[ilayer+1] ; else // A LOCAL layer has different weights for each neuron wt_cols = d_nhid_cols[ilayer+1] ; // For LOCAL layers, neuron layout in current layer is (height, width, depth). sum = 0.0 ; for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. rstart = strideV * next_row - padV ; cstart = strideH * next_col - padH ; // This is what we would be testing if we didn't compute the exact limits above // rstop = rstart + 2 * d_HalfWidV[ilayer+1] ; // cstop = cstart + 2 * d_HalfWidH[ilayer+1] ; // if (this_row >= rstart && this_row <= rstop && this_col >= cstart && this_col <= cstop) { for (next_slice=0 ; next_slice<depth ; next_slice++) { k_next = (next_row * width + next_col) * depth + next_slice ; if (ltype == TYPE_CONV) // A CONV layer has the same weight set for all neurons in visible field wtptr = weights + next_slice ; else // A LOCAL layer has different weights for each neuron (height, width, depth) wtptr = weights + k_next ; k = ((this_row - rstart) * nH + this_col - cstart) * d_depth[ilayer] + this_slice ; // Location in filter sum += this_delta_ptr[k_next] * wtptr[k*wt_cols] ; } // For next_col } // For next_row } // For next_slice // ihid = (this_row * d_width[ilayer] + this_col) * d_depth[ilayer] + this_slice ; if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*d_nhid[ilayer]+ihid] ; sum *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = sum ; // Save it for doing the next layer back } int cuda_backprop_delta_nonpooled ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_backprop_delta_nonpooled) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_nonpooled launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_pooled - Backpropagate delta from a POOLAVG or POOLMAX layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_pooled ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is POOLAVG or POOLMAX ) { int k, icase, ihid, next_row, next_col, this_row, this_col, this_slice ; int k_next, prod, this_cols, *poolmax_id_ptr ; int next_rstart, next_rstop, next_cstart, next_cstop ; double *this_delta_ptr, *prior_delta_ptr, sum, this_act ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; prod = d_width[ilayer] * d_depth[ilayer] ; this_row = ihid / prod ; k = ihid - this_row * prod ; this_col = k / d_depth[ilayer] ; this_slice = k % d_depth[ilayer] ; icase = blockIdx.y ; this_delta_ptr = d_this_delta + icase * d_nhid[ilayer+1] ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * d_nhid[ilayer] ; // Save for the next layer to do, one layer back // this >= next * stride IMPLIES next <= this / stride // this <= next * stride + pw - 1 IMPLIES next >= (this - pw + 1) / stride // We can safely do this in integer arithmetic next_rstop = this_row ; k = next_rstart = next_rstop - d_PoolWidV[ilayer+1] + 1 ; next_rstop /= d_strideV[ilayer+1] ; next_rstart /= d_strideV[ilayer+1] ; if (k >= 0 && k % d_strideV[ilayer+1]) ++next_rstart ; if (next_rstop >= d_height[ilayer+1]) next_rstop = d_height[ilayer+1] - 1 ; if (next_rstart < 0) next_rstart = 0 ; next_cstop = this_col ; k = next_cstart = next_cstop - d_PoolWidH[ilayer+1] + 1 ; next_cstop /= d_strideH[ilayer+1] ; next_cstart /= d_strideH[ilayer+1] ; if (k >= 0 && k % d_strideH[ilayer+1]) ++next_cstart ; if (next_cstop >= d_width[ilayer+1]) next_cstop = d_width[ilayer+1] - 1 ; if (next_cstart < 0) next_cstart = 0 ; sum = 0.0 ; if (d_layer_type[ilayer+1] == TYPE_POOLAVG) { for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { k_next = (next_row * d_width[ilayer+1] + next_col) * d_depth[ilayer+1] + this_slice ; sum += this_delta_ptr[k_next] ; } // For next_col } // For next_row sum /= d_PoolWidH[ilayer+1] * d_PoolWidV[ilayer+1] ; } // POOLAVG else if (d_layer_type[ilayer+1] == TYPE_POOLMAX) { poolmax_id_ptr = d_poolmax_id[ilayer+1] + icase * d_nhid[ilayer+1] ; this_cols = d_width[ilayer] ; for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { k_next = (next_row * d_width[ilayer+1] + next_col) * d_depth[ilayer+1] + this_slice ; // Was the current-layer neuron the winner in the MAX competition for the next-layer competition? if (this_row == poolmax_id_ptr[k_next] / this_cols && this_col == poolmax_id_ptr[k_next] % this_cols) sum += this_delta_ptr[k_next] ; // Weight is 1 } // For next_col } // For next_row } // POOLMAX // ihid = (this_row * d_width[ilayer] + this_col) * d_depth[ilayer] + this_slice ; if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*d_nhid[ilayer]+ihid] ; sum *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = sum ; // Save it for doing the next layer back } int cuda_backprop_delta_pooled ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_backprop_delta_pooled) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_pooled launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- move_delta - Move delta from prior_delta to this_delta ----------------------------------------------------------------------------------- */ __global__ void device_move_delta ( int nhid // Number of neurons in the layer just processed ) { int icase, ihid ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= nhid) return ; icase = blockIdx.y ; d_this_delta[icase*nhid+ihid] = d_prior_delta[icase*nhid+ihid] ; } int cuda_move_delta ( int nc , // Number of cases in batch int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nc ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_move_delta) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nhid_this ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_move_delta launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- hidden_gradient_FC - Compute gradient for a fully connected hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_hidden_gradient_FC ( int istart , // Index of first case in this batch int nc , // Number of cases in batch int ilayer // Hidden layer being processed ) { int iin, ihid, nin, ninp1 ; float *gptr ; double input ; iin = blockIdx.x * blockDim.x + threadIdx.x ; if (ilayer == 0) nin = d_n_pred ; // Number of inputs to each neuron in this layer else nin = d_nhid[ilayer-1] ; // icase = blockIdx.z ; // Used directly below if (iin > nin) return ; else if (iin == nin) input = 1.0 ; // Bias else if (ilayer) input = d_act[ilayer-1][blockIdx.z*nin+iin] ; else input = d_predictors[(istart+blockIdx.z)*nin+iin] ; ihid = blockIdx.y ; ninp1 = nin + 1 ; // We mustn't forget the bias, so nin+1 gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case gptr[ihid*ninp1+iin] = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid] * input ; } /* ------------------------------------------------------------------------------------------------------ hidden_gradient_LOCAL_CONV - Compute gradient for a locally connected or convolutional hidden layer For a LOCAL layer, we do all of the nhid * n_prior_weights * max_batch entries. But for a CONV layer, there are just depth * n_prior_weights * max_batch entries because the weight set for every (height,width) placement in the visual field is the same in any single slice. So there are not enough entries in the gradient vector. Thus, we use the previously allocated convgrad_work vector, which has nhid * n_prior_weights * max_batch entries. Then we will launch another kernel which flattens out the height and width dimensions by summing them into the gradient vector. Note: ihidstart must be a multiple of height*width! ------------------------------------------------------------------------------------------------------ */ __global__ void device_hidden_gradient_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int nfilt , // Filter size, (2*hwV+1) * (2*hwH+1) * depth of input (does not include +1 for bias) int istart , // Index of first case in this batch int depth_offset , // Start processing layers at this depth int n_depths , // Number of slices to be processed int ilayer // Hidden layer being processed ) { int k, iin, ifilt, ihid_offset, ihid_actual, prod ; int in_row, in_col, in_slice, in_rows, in_cols, in_slices ; int this_row, this_col, ifiltV, ifiltH ; float *gptr ; double input, delta ; ifilt = blockIdx.x * blockDim.x + threadIdx.x ; // <= filter size if (ifilt > nfilt) return ; // Input is from either the input image or a prior layer's activations // Get the input dimensions (height, width, depth) if (ilayer == 0) { in_rows = d_img_rows ; in_cols = d_img_cols ; in_slices = d_img_bands ; } else { in_rows = d_height[ilayer-1] ; in_cols = d_width[ilayer-1] ; in_slices = d_depth[ilayer-1] ; } // We may be splitting the computation into multiple launches, doing one or more slices in each. // If so, we need to compute the actual slice/neuron being processed here. // If we are doing a CONV layer, the offset will be into convgrad_work. // Whenever we access data, we use ihid_actual, and we also use it to save a LOCAL gradient. // But when we save a CONV gradient, we use ihid_offset. // Recall that hidden neurons are stored with depth changing fastest. ihid_offset = blockIdx.y ; // Offset into this launch set prod = d_width[ilayer] * d_height[ilayer] ; // Size of visual field, a slice k = ihid_offset % n_depths + depth_offset ; // Actual starting slice ihid_actual = ihid_offset / n_depths * d_depth[ilayer] + k ; // Actual hidden neuron being done // If this is the bias term, it's simple. // Recall that blockIdx.z is the case in this batch if (ifilt == nfilt) { // Bias term delta = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid_actual] ; if (local_vs_conv) { gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case gptr[ihid_actual*d_n_prior_weights[ilayer]+d_n_prior_weights[ilayer]-1] = delta ; } else { gptr = d_convgrad_work + blockIdx.z * d_max_convgrad_each ; gptr[ihid_offset*d_convgrad_cols[ilayer]+d_n_prior_weights[ilayer]-1] = delta ; } return ; } // Get the location of this kernel within the filter // Thread ifilt is the ordinal number of the filter element // The filter order is (height, width, slice) prod = (2 * d_HalfWidH[ilayer] + 1) * in_slices ; ifiltV = ifilt / prod ; k = ifilt - ifiltV * prod ; ifiltH = k / in_slices ; in_slice = k % in_slices ; // Get the location of this neuron within the volume of the current layer prod = d_width[ilayer] * d_depth[ilayer] ; this_row = ihid_actual / prod ; k = ihid_actual - this_row * prod ; this_col = k / d_depth[ilayer] ; // this_slice = k % d_depth[ilayer] ; // Not needed; here for clarity only // Get the location of this filter element within the input volume. // It may be outside an edge, in which case there is nothing to do. // The filter center is at stride * CurrentPos + HalfWidth - Pad. // The upper-left corner is at stride * CurrentPos - Pad. // This can cause branch-induced stalling, but only at edges. in_row = d_strideV[ilayer] * this_row - d_padV[ilayer] + ifiltV ; if (in_row < 0 || in_row >= in_rows) return ; in_col = d_strideH[ilayer] * this_col - d_padH[ilayer] + ifiltH ; if (in_col < 0 || in_col >= in_cols) return ; // Here we go if (local_vs_conv) gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case else gptr = d_convgrad_work + blockIdx.z * d_max_convgrad_each ; delta = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid_actual] ; // Fetch the input. Adjacent threads have adjacent memory accesses, though not zero padded for alignment. // But zero padding would do no good here because in general, warps will only by chance start with iin=0. // All is great if in_slices and prior-layer size are multiples of 16! iin = (in_row * in_cols + in_col) * in_slices + in_slice ; if (ilayer) input = d_act[ilayer-1][blockIdx.z*d_nhid[ilayer-1]+iin] ; else input = d_predictors[(istart+blockIdx.z)*d_n_pred+iin] ; // Adjacent threads access adjacent memory, though there is no zero padding for alignment. // Zero padding here would help, because ifilt starts at zero. // But that would complicate the code a lot, and this is a small fraction of instructions. // Also, the kernel is generally limited by the math pipeline. // And of course if n_prior_weights is a multiple of 32, all is good! if (local_vs_conv) gptr[ihid_actual*d_n_prior_weights[ilayer]+ifilt] = input * delta ; else gptr[ihid_offset*d_convgrad_cols[ilayer]+ifilt] = input * delta ; } __global__ void device_flatten_gradient ( int islice_start , // Index of first slice in this batch int max_depth , // Max depth in launch, <= slices reserved in convgrad_work int ilayer // Hidden layer being processed ) { int k, islice, icase, iprior, irow, icol ; double sum ; float *workptr, *gradptr ; iprior = blockIdx.x * blockDim.x + threadIdx.x ; if (iprior >= d_n_prior_weights[ilayer]) return ; islice = blockIdx.y ; icase = blockIdx.z ; gradptr = d_grad[ilayer] + icase * d_n_weights ; // Gradient of this hidden layer for this case workptr = d_convgrad_work + icase * d_max_convgrad_each ; // nvisual = d_height[ilayer] * d_width[ilayer] ; // Also equals nhid / depth sum = 0.0 ; for (irow=0 ; irow<d_height[ilayer] ; irow++) { for (icol=0 ; icol<d_width[ilayer] ; icol++) { k = (irow * d_width[ilayer] + icol) * max_depth + islice ; // The neuron at (irow, icol, islice) // assert ( k*d_convgrad_cols[ilayer]+iprior < d_max_convgrad_each ) ; sum += workptr[k*d_convgrad_cols[ilayer]+iprior] ; } } gradptr[(islice+islice_start)*d_n_prior_weights[ilayer]+iprior] = sum ; } int cuda_hidden_gradient ( int max_hid_grad , // Max hid in a CONV hid grad launch; multiple of height*width; <= 65535 int max_mem_grad , // Maximum CONV working memory (MB) per CUDA launch; prevents timeout error and lowers memory use int istart , // Index of first case in this batch int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int type , // Type of this layer int nhid_this , // Number of hidden neurons in this layer int nhid_prior , // And in prior layer int depth , // Depth of this layer int n_prior_weights , // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 int *n_launches // Returned for user edification ) { int i, conv_cols, n_max, nhid_launch, ihid_start, warpsize, threads_per_block, field, divisor ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; field = nhid_this / depth ; // Visual field size = height * width warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future *n_launches = 1 ; // This is purely for reporting launch statistics if (type == TYPE_FC) { threads_per_block = (nhid_prior + 1 + warpsize - 1) / warpsize * warpsize ; // +1 includes bias if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_prior + 1 + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nhid_this ; block_launch.z = nc ; hipLaunchKernelGGL(( device_hidden_gradient_FC) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart , nc , ilayer ) ; hipDeviceSynchronize() ; } else if (type == TYPE_LOCAL || type == TYPE_CONV) { divisor = 1 ; // Figure out how much we have to divide slices to meet max_hid_grad and max_mem_grad limits if (type == TYPE_CONV) { conv_cols = (n_prior_weights + 31) / 32 * 32 ; // CONV scratch is zero padded for full coalescing n_max = 1024 * 1024 * max_mem_grad / (max_batch * conv_cols * sizeof(float)) ; // Launch limit satisfying memory } else n_max = MAXPOSNUM ; for ( ;; ) { nhid_launch = depth / divisor * field ; // We will launch this many hid at a time if (nhid_launch <= max_hid_grad && nhid_launch <= n_max) break ; ++divisor ; } if (nhid_launch < field) // Careless user may have set it too small nhid_launch = field ; // So ignore it /* Launch loop */ *n_launches = 0 ; if (type == TYPE_CONV) { // We must zero the CONV work area because some entries may be undefined // This must also be done in the last pass, because a partial launch at the end // may have garbage from the prior launch in 'undefined' locations. for (i=0 ; i<max_convgrad_work ; i++) fdata[i] = 0.0 ; error_id = hipMemcpy ( h_convgrad_work , fdata , max_convgrad_work * sizeof(float) , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient_LOCAL_CONV convgrad_work zero error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } for (ihid_start=0 ; ihid_start < depth*field ; ihid_start+=nhid_launch) { threads_per_block = (n_prior_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) // Increase? May be reasonable threads_per_block = 4 * warpsize ; block_launch.x = (n_prior_weights + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid_launch ; if (depth*field - ihid_start < nhid_launch) { // Last launch may be partial block_launch.y = depth*field - ihid_start ; if (type == TYPE_CONV) { for (i=0 ; i<max_convgrad_work ; i++) fdata[i] = 0.0 ; error_id = hipMemcpy ( h_convgrad_work , fdata , max_convgrad_work * sizeof(float) , hipMemcpyHostToDevice ) ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient_LOCAL_CONV convgrad_work zero error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } } // If last launch is partial block_launch.z = nc ; hipLaunchKernelGGL(( device_hidden_gradient_LOCAL_CONV) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, type==TYPE_LOCAL ? 1 : 0 , n_prior_weights-1 , istart , ihid_start/field , block_launch.y/field , ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient LOCAL_CONV launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } if (type == TYPE_CONV) { // Must also flatten gradient? assert ( nhid_launch * nc * n_prior_weights <= max_convgrad_work ) ; threads_per_block = (n_prior_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_prior_weights + threads_per_block - 1) / threads_per_block ; block_launch.y /= field ; // Number of slices in launch block_launch.z = nc ; hipLaunchKernelGGL(( device_flatten_gradient) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, ihid_start/field , block_launch.y , ilayer ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient flatten_gradient launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } // CONV ++*n_launches ; } // Launch loop } // LOCAL or CONV return 0 ; } /* -------------------------------------------------------------------------------- zero_gradient - Some gradient entires may be undefined (zero, actually) due to lack of connections in a poorly designed model. So before computing the gradient for a batch, we must zero the entire gradient vector. -------------------------------------------------------------------------------- */ __global__ void device_zero_gradient ( int nc // Number of cases in batch ) { int index, icase ; float *gptr ; index = blockIdx.x * blockDim.x + threadIdx.x ; if (index >= d_n_weights) return ; icase = blockIdx.y ; gptr = d_grad[0] + index ; // Complete gradient starts at [0] gptr[icase*d_n_weights] = 0.0f ; } int cuda_zero_gradient ( int nc , // Number of cases in batch int n_weights // Number of weights ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_weights + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; hipLaunchKernelGGL(( device_zero_gradient) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_zero_gradient launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- fetch_gradient - Retrieve sum across batch of complete gradient The CUDA grad is neither the order of the CUDA weights, nor the HOST grad! Rather, they are grouped by current neuron, (row, col, slice), and with input order as the CUDA inputs (row, column, slice). A fully connected layer has height=width=1; all neurons are depth. -------------------------------------------------------------------------------- */ __global__ void device_fetch_gradient ( int nc // Number of cases in batch ) { int index, icase ; float *gptr ; double sum ; index = blockIdx.x * blockDim.x + threadIdx.x ; if (index >= d_n_weights) return ; sum = 0.0 ; gptr = d_grad[0] + index ; // Complete gradient starts at [0] for (icase=0 ; icase<nc ; icase++) // For all cases in this batch sum += gptr[icase*d_n_weights] ; *gptr = sum ; } int cuda_fetch_gradient ( int nc , // Number of cases in batch int n_weights , // Number of weights double **hostgrad , // Gradient sum output here int n_classes , // Number of outputs int n_layers , // Hidden layers; does not include output int *layer_type , // Each entry (input to final) is TYPE_? in CONST.H int img_rows , // Size of input image int img_cols , int img_bands , int *height , // Height of visible field in each layer int *width , // Width of visible field int *depth , // Number of slices in each layer int *nhid , // Number of hidden neurons in each layer int *hwH , // Half-width of filters int *hwV ) { int warpsize, blocks_per_grid, threads_per_block ; int n, n_prior, ilayer, isub ; int idepth, iheight, iwidth, ndepth, nheight, nwidth ; int in_row, in_col, in_slice, in_n_height, in_n_width, in_n_depth ; double *gptr ; float *fptr ; char msg[256] ; hipError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; blocks_per_grid = (n_weights + threads_per_block - 1) / threads_per_block ; hipLaunchKernelGGL(( device_fetch_gradient) , dim3(blocks_per_grid) , dim3(threads_per_block) , 0, 0, nc ) ; hipDeviceSynchronize() ; error_id = hipGetLastError () ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_fetch_gradient launch error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } error_id = hipMemcpy ( fdata , grad , n_weights * sizeof(float) , hipMemcpyDeviceToHost ) ; if (error_id != hipSuccess) { sprintf_s ( msg , 255 , "cuda_fetch_gradient copy error %d: %s", error_id, hipGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } /* Reorder */ fptr = fdata ; for (ilayer=0 ; ilayer<=n_layers ; ilayer++) { gptr = hostgrad[ilayer] ; /* Fully connected */ if (ilayer == n_layers || layer_type[ilayer] == TYPE_FC) { if (ilayer == 0) { in_n_height = img_rows ; in_n_width = img_cols ; in_n_depth = img_bands ; } else { in_n_height = height[ilayer-1] ; in_n_width = width[ilayer-1] ; in_n_depth = depth[ilayer-1] ; } n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias if (ilayer == n_layers) n = n_classes ; // Equals depth else n = nhid[ilayer] ; // Equals depth for (idepth=0 ; idepth<n ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row // Bias isub = idepth * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } /* LOCAL */ else if (layer_type[ilayer] == TYPE_LOCAL) { // For LOCAL layers, neuron layout in current layer is (height, width, depth). n = nhid[ilayer] ; ndepth = depth[ilayer] ; nheight = height[ilayer] ; nwidth = width[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row // Bias isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } // For iwidth } // For iheight } /* CONV */ else if (layer_type[ilayer] == TYPE_CONV) { nheight = height[ilayer] ; nwidth = width[ilayer] ; ndepth = depth[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (idepth=0 ; idepth<ndepth ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row //Bias isub = idepth * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } } // For ilayer assert ( fptr == fdata + n_weights ) ; return 0 ; } /* -------------------------------------------------------------------------------- CUDA_CLEANUP - Cleanup after CUDA processing -------------------------------------------------------------------------------- */ void cuda_cleanup ( int n_layers , int *layer_type ) { int i ; double sum ; char msg[256] ; MEMTEXT ( "CUDA cuda_cleanup starting" ) ; if (h_predictors != NULL) { hipFree ( h_predictors ) ; h_predictors = NULL ; } if (h_class != NULL) { hipFree ( h_class ) ; h_class = NULL ; } if (activations != NULL) { hipFree ( activations ) ; activations = NULL ; } if (h_output != NULL) { hipFree ( h_output ) ; h_output = NULL ; } for (i=0 ; i<n_layers ; i++) { if (h_poolmax_id[i] != NULL) { hipFree ( h_poolmax_id[i] ) ; h_poolmax_id[i] = NULL ; } } if (weights != NULL) { hipFree ( weights ) ; weights = NULL ; } if (grad != NULL) { hipFree ( grad ) ; grad = NULL ; } if (h_convgrad_work != NULL) { hipFree ( h_convgrad_work ) ; h_convgrad_work = NULL ; } if (h_this_delta != NULL) { hipFree ( h_this_delta ) ; h_this_delta = NULL ; } if (h_prior_delta != NULL) { hipFree ( h_prior_delta ) ; h_prior_delta = NULL ; } if (h_ll_out != NULL) { hipFree ( h_ll_out ) ; h_ll_out = NULL ; } if (reduc_fdata != NULL) { FREE ( reduc_fdata ) ; reduc_fdata = NULL ; } if (fdata != NULL) { FREE ( fdata ) ; fdata = NULL ; } total_memory = 0.0 ; hipDeviceReset () ; /* Print CUDA timers */ sum = 1.e-20 ; for (i=0 ; i<MAX_LAYERS ; i++) { sum += CudaTimers.act[i] ; sum += CudaTimers.delta[i] ; sum += CudaTimers.grad[i] ; } sum += CudaTimers.weights + CudaTimers.softmax + CudaTimers.ll + CudaTimers.movedelta + CudaTimers.fetchgrad ; cudalog ( "" ) ; cudalog ( "" ) ; cudalog ( "CUDA times in seconds: total, (percent), per launch" ) ; cudalog ( "" ) ; sprintf ( msg, " Send weights = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.weights, 100.0 * CudaTimers.weights / sum, 0.001 * CudaTimers.weights / (CudaTimers.ncalls_weights + 1.e-20)) ; cudalog ( msg ) ; for (i=0 ; i<=n_layers ; i++) { if (i == n_layers) cudalog ( " Output layer" ) ; else if (layer_type[i] == TYPE_FC) { sprintf ( msg, " Layer %d is fully connected", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_LOCAL) { sprintf ( msg, " Layer %d is locally connected", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_CONV) { sprintf ( msg, " Layer %d is convolutional", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_POOLAVG) { sprintf ( msg, " Layer %d is pooled average", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_POOLMAX) { sprintf ( msg, " Layer %d is pooled max", i+1 ) ; cudalog ( msg ) ; } sprintf ( msg, " act = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.act[i], 100.0 * CudaTimers.act[i] / sum, 0.001 * CudaTimers.act[i] / (CudaTimers.ncalls_act[i] + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " delta = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.delta[i], 100.0 * CudaTimers.delta[i] / sum, 0.001 * CudaTimers.delta[i] / (CudaTimers.ncalls_delta[i] + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " grad = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.grad[i], 100.0 * CudaTimers.grad[i] / sum, 0.001 * CudaTimers.grad[i] / (CudaTimers.ncalls_grad[i] + 1.e-20)) ; cudalog ( msg ) ; assert ( CudaTimers.grad[i] >= 0.0 ) ; assert ( CudaTimers.ncalls_grad[i] >= 0.0 ) ; assert ( (0.001 * CudaTimers.grad[i] / (CudaTimers.ncalls_grad[i] + 1.e-20)) >= 0.0 ) ; } sprintf ( msg, " SoftMax = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.softmax, 100.0 * CudaTimers.softmax / sum, 0.001 * CudaTimers.softmax / (CudaTimers.ncalls_softmax + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Log likelihood = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.ll, 100.0 * CudaTimers.ll / sum, 0.001 * CudaTimers.ll / (CudaTimers.ncalls_ll + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Move delta = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.movedelta, 100.0 * CudaTimers.movedelta / sum, 0.001 * CudaTimers.movedelta / (CudaTimers.ncalls_movedelta + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Fetch grad = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.fetchgrad, 100.0 * CudaTimers.fetchgrad / sum, 0.001 * CudaTimers.fetchgrad / (CudaTimers.ncalls_fetchgrad + 1.e-20)) ; cudalog ( msg ) ; MEMTEXT ( "CUDA cuda_cleanup ending" ) ; }
72c7af634d97dcd5c0b6a117a40ec5c7eb319a6f.cu
/******************************************************************************/ /* */ /* MOD_CUDA.CU - Core CUDA routines for model training */ /* */ /******************************************************************************/ #define STRICT #include <windows.h> #include <commctrl.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <ctype.h> #include <malloc.h> #include <new.h> #include <float.h> #include <driver_types.h> #include <cuda_runtime_api.h> #include "convnet.rh" #include "const.h" #include "classes.h" #include "extern.h" #include "funcdefs.h" #define MAX_EXP 300.0 // NOTE... To set up a new project for CUDA, right-click the project name in // Solution Explorer, click Build Customizations, select CUDA. // Linker needs additional library directory $(CudaToolkitLibDir) // Also needs Input/ Additional dependencies cuda.lib;cudart.lib // This is used as intermediary between device's float and hosts double static float *fdata = NULL ; static int n_weights ; // Total number of weights across all layers static int n_weights_on_device ; // Ditto, but extended for 128-byte rows static int max_convgrad_work ; // Work area size (# of floats) for CONV gradient, = max_batch * max_convgrad_each static int max_batch ; // Max number of cases in a launched batch // This is strictly for printing memory allocation info for the user static double total_memory = 0.0 ; // These are for the reductions used in device_ll // The number of threads MUST be a power of two! // The number of blocks given here is a maximum. The actual number may be less. #define REDUC_THREADS 256 #define REDUC_BLOCKS 64 // This is for shared memory staging of convolution #define BLOCK_SIZE 32 static float *reduc_fdata = NULL ; // These are set in ?_cuda_init and used by the host routine that launches the kernel // They are basic app parameters, constant for all launches // Names that begin with d_ are in the device namespace. // Names that begin with h_ are in the host namespace and equal the device value. // This lets us save a little time by avoiding the need to pass a bunch of parameters in the launch. // We could, of course, just pass data pointers as parameters. But that's overhead. // So instead we use cudaMemcpyToSymbol() to copy the values in the host namespace // to values on the device. This lets __global routines address the values that are // already set on the device rather than having to use passed parameters. // The savings is probably small, but worthwhile. __constant__ int d_ncases ; // Number of cases in complete training set __constant__ int d_img_rows ; // Number of rows in input image __constant__ int d_img_cols ; // Number of cols in input image __constant__ int d_img_bands ; // Number of bands in input image __constant__ int d_n_pred ; // Number of predictors __constant__ int d_n_classes ; // Number of classes __constant__ int d_n_classes_cols ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual) __constant__ int d_n_layers ; // Number of hidden layers; does not include output layer __constant__ int d_n_weights ; // Total number of weights across all layers __constant__ int d_convgrad_cols[MAX_LAYERS] ; // n_prior_weights[ilayer] bumped up to multiple of 32 __constant__ int d_max_convgrad_each ; // Max hid * convwts_cols in a CONV hid grad launch (work area per case) // This holds a single case // See the convgrad_work allocation section for details // max_convgrad_work = this times max_batch __constant__ int d_layer_type[MAX_LAYERS] ;// TYPE_? in CONST.H __constant__ int d_nhid[MAX_LAYERS] ; // Number of neurons in each of the hidden layers = height*width*depth __constant__ int d_nhid_cols[MAX_LAYERS] ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual) __constant__ int d_height[MAX_LAYERS] ; // Height (rows) of each layer __constant__ int d_width[MAX_LAYERS] ; // And width __constant__ int d_depth[MAX_LAYERS] ; // And number of slices __constant__ int d_depth_cols[MAX_LAYERS] ; // Ditto, extended to multiple of 128 bytes (32 floats) (actual); for CONV only __constant__ int d_n_prior_weights[MAX_LAYERS] ; // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 // A CONV layer has this many weights per layer (slice); a LOCAL layer has this times its nhid __constant__ int d_HalfWidH[MAX_LAYERS] ; // Horizontal half width looking back to prior layer __constant__ int d_HalfWidV[MAX_LAYERS] ; // And vertical __constant__ int d_padH[MAX_LAYERS] ; // Horizontal padding, should not exceed half width __constant__ int d_padV[MAX_LAYERS] ; // And vertical __constant__ int d_strideH[MAX_LAYERS] ; // Horizontal stride __constant__ int d_strideV[MAX_LAYERS] ; // And vertical __constant__ int d_PoolWidH[MAX_LAYERS] ; // Horizontal pooling width looking back to prior layer __constant__ int d_PoolWidV[MAX_LAYERS] ; // And vertical static float *h_predictors = NULL ; // Raw training data; n_cases by n_pred __constant__ float *d_predictors ; static int *h_class = NULL ; // Class id is here __constant__ int *d_class ; static double *activations = NULL ; // Activations of this layer, which we compute __constant__ double *d_act[MAX_LAYERS] ; // Pointers to activation vector of each layer static double *h_output = NULL ; // Output activations __constant__ double *d_output ; static int *h_poolmax_id[MAX_LAYERS] ; // Used only for POOLMAX layer; saves from forward pass ID of max input for backprop pass __constant__ int *d_poolmax_id[MAX_LAYERS] ; // Pointers to id vector for each layer; NULL for other than POOLMAX layer static float *weights = NULL ; // All weights, including output layer __constant__ float *d_weights[MAX_LAYERS+1] ; // Pointers to weight vector of each layer, including output // WARNING... If gradient is ever double instead of float, see MLFN_CUDA.CPP for integer overflow check! static float *grad = NULL ; // Gradient for all weights, including output layer __constant__ float *d_grad[MAX_LAYERS+1] ; // Pointers to grad vector of each layer, including output // These are for the first case, and max_batch gradient sets are allocated static float *h_convgrad_work = NULL ; // Scratch for unflattened convolution layer gradient __constant__ float *d_convgrad_work ; static double *h_this_delta = NULL ; // Delta for current layer __constant__ double *d_this_delta ; static double *h_prior_delta = NULL ;// Delta for next layer back __constant__ double *d_prior_delta ; static float *h_ll_out = NULL ; __constant__ float *d_ll_out ; static cudaDeviceProp deviceProp ; __global__ void device_hidden_activation_FC ( int istart , int istop , int ilayer ) ; __global__ void device_hidden_activation_LOCAL_CONV ( int local_vs_conv , int case_start , int case_offset , int slice_start , int n_slices , int ilayer ) ; __global__ void device_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , int istart , int ilayer ) ; __global__ void device_hidden_activation_POOLED ( int avg_vs_max , int istart , int ilayer ) ; __global__ void device_output_activation_no_hidden ( int istart ) ; __global__ void device_output_activation ( int istart ) ; __global__ void device_softmax ( int istart , int istop ) ; __global__ void device_ll () ; __global__ void device_output_delta ( int istart ) ; __global__ void device_output_gradient_no_hidden ( int istart , int nc ) ; __global__ void device_output_gradient ( int nc , int ilayer ) ; __global__ void device_backprop_delta_FC ( int ilayer ) ; __global__ void device_backprop_delta_nonpooled ( int ilayer ) ; __global__ void device_backprop_delta_pooled ( int ilayer ) ; __global__ void device_move_delta ( int nhid ) ; __global__ void device_hidden_gradient_FC ( int istart , int nc , int ilayer ) ; __global__ void device_hidden_gradient_LOCAL_CONV ( int local_vs_conv , int nfilt , int istart , int depth_offset , int n_depths , int ilayer ) ; __global__ void device_flatten_gradient ( int islice_start , int max_depth , int ilayer ) ; __global__ void device_zero_gradient ( int nc ) ; __global__ void device_fetch_gradient ( int nc ) ; /* ----------------------------------------------------- cuda_init() - Initialize for a model configuration ----------------------------------------------------- */ int cuda_init ( int n_cases , // Total number of cases int n_img_rows , // Number of rows in input image int n_img_cols , // Number of cols in input image int n_img_bands , // Number of bands in input image int n_pred , // Number of predictors int n_classes , // Number of classes double *data , // Ncases by (n_pred+n_classes) data array int max_batch_size , // Max number of cases that caller wants in a single launch int max_hid_grad , // Max hid in a CONV hid grad launch; multiple of height*width; <= 65536 int max_mem_grad , // Max memory (bytes) used for CONV scratch storage, which has the potential to be huge int n_all_wts , // Total number of weights (all layers, including output, and all bias terms) int n_layers , // Number of layers, not including final int layer_type[MAX_LAYERS] , // Each entry (input to final) is TYPE_? in CONST.H int nhid[MAX_LAYERS] , // Total number of neurons in this layer = height times width times depth int n_prior_weights[MAX_LAYERS] , // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 // A CONV layer has this many weights per layer (slice); a LOCAL layer has this times its nhid int height[MAX_LAYERS] , // Number of neurons vertically in a slice of this layer, 1 if fully connected int width[MAX_LAYERS] , // Ditto horizontal int depth[MAX_LAYERS] , // Number of hidden neurons if fully connected, else number of slices in this layer int HalfWidH[MAX_LAYERS] , // Horizontal half width looking back to prior layer int HalfWidV[MAX_LAYERS] , // And vertical int padH[MAX_LAYERS] , // Horizontal padding, should not exceed half width int padV[MAX_LAYERS] , // And vertical int strideH[MAX_LAYERS] , // Horizontal stride int strideV[MAX_LAYERS] , // And vertical int PoolWidH[MAX_LAYERS] , // Horizontal pooling width looking back to prior layer int PoolWidV[MAX_LAYERS] , // And vertical char *error_msg // Returns text of error if problem ) { int i, j, ilayer, irow, icol, iband, ncols, memsize, n_total, n_max, n_classes_cols ; int nhid_cols[MAX_LAYERS], depth_cols[MAX_LAYERS], convgrad_cols[MAX_LAYERS] ; int *iclass, ibest, divisor, threads_per_block, batch_size_limit ; double best, *xptr, *dptr[MAX_LAYERS+1] ; float *fptr[MAX_LAYERS+1] ; char msg[256] ; cudaError_t error_id ; MEMTEXT ( "MOD_CUDA.cu: cuda_init starting" ) ; cudalog ( "" ) ; max_batch = max_batch_size ; /* Initialize CUDA timers */ for (ilayer=0 ; ilayer<=MAX_LAYERS ; ilayer++) { CudaTimers.ncalls_act[ilayer] = 0 ; CudaTimers.act[ilayer] = 0 ; CudaTimers.ncalls_delta[ilayer] = 0 ; CudaTimers.delta[ilayer] = 0 ; CudaTimers.ncalls_grad[ilayer] = 0 ; CudaTimers.grad[ilayer] = 0 ; } CudaTimers.ncalls_weights = 0 ; CudaTimers.weights = 0 ; CudaTimers.ncalls_softmax = 0 ; CudaTimers.softmax = 0 ; CudaTimers.ncalls_ll = 0 ; CudaTimers.ll = 0 ; CudaTimers.ncalls_movedelta = 0 ; CudaTimers.movedelta = 0 ; CudaTimers.ncalls_fetchgrad = 0 ; CudaTimers.fetchgrad = 0 ; error_id = cudaSetDevice ( cuda_present - 1 ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init SetDevice failed %d: %s", error_id, cudaGetErrorString(error_id) ) ; MEMTEXT ( error_msg ) ; audit ( error_msg ) ; cuda_enable = 0 ; return ERROR_CUDA_ERROR ; } cudaGetDeviceProperties ( &deviceProp , 0 ) ; /* Constants We also keep nhid_cols, which is the neurons counts bumped up to multiples of 32 (actual) so as to keep rows of weight matrices starting on 128-byte boundaries. Ditto for output weights. For CONV layers, we bump up depth because every neuron in visual field (height*width) has the same weights in a given slice. */ n_weights = n_all_wts ; ncols = n_pred + n_classes ; n_classes_cols = (n_classes + 31) / 32 * 32 ; // For memory alignment of weights to 128 bytes // This applies to only output weights for (i=0 ; i<n_layers ; i++) { nhid_cols[i] = (nhid[i] + 31) / 32 * 32 ; depth_cols[i] = (depth[i] + 31) / 32 * 32 ; h_poolmax_id[i] = NULL ; } cudaMemcpyToSymbol ( d_ncases , &n_cases , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_img_rows , &n_img_rows , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_img_cols , &n_img_cols , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_img_bands , &n_img_bands , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_pred , &n_pred , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_classes , &n_classes , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_classes_cols , &n_classes_cols , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_layers , &n_layers , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_weights , &n_weights , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_nhid , nhid , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_nhid_cols , nhid_cols , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_layer_type , layer_type , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_height , height , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_width , width , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_depth , depth , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_depth_cols , depth_cols , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_n_prior_weights , n_prior_weights , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_HalfWidH , HalfWidH , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_HalfWidV , HalfWidV , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_padH , padH , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_padV , padV , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_strideH , strideH , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_strideV , strideV , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_PoolWidH , PoolWidH , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_PoolWidV , PoolWidV , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; /* Set shared memory / cache preferences */ cudaFuncSetCacheConfig ( device_hidden_activation_FC , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_hidden_activation_LOCAL_CONV , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_hidden_activation_LOCAL_CONV_shared , cudaFuncCachePreferNone ) ; cudaFuncSetCacheConfig ( device_hidden_activation_POOLED , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_output_activation_no_hidden , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_output_activation , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_softmax , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_ll , cudaFuncCachePreferNone ) ; cudaFuncSetCacheConfig ( device_output_delta , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_output_gradient_no_hidden , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_output_gradient , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_backprop_delta_FC , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_backprop_delta_nonpooled , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_backprop_delta_pooled , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_move_delta , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_hidden_gradient_FC , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_hidden_gradient_LOCAL_CONV , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_flatten_gradient , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_zero_gradient , cudaFuncCachePreferL1 ) ; cudaFuncSetCacheConfig ( device_fetch_gradient , cudaFuncCachePreferL1 ) ; /* Predictors - We extract only the first n_pred columns from the n_pred+n_classes columns in data Reorder them so band changes fastest */ fdata = (float *) MALLOC ( n_cases * n_pred * sizeof(float) ) ; if (fdata == NULL) return ERROR_INSUFFICIENT_MEMORY ; memsize = n_cases * n_pred * sizeof(float) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_predictors , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC predictors = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_predictors, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc predictors (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } j = 0 ; for (i=0 ; i<n_cases ; i++) { xptr = data + i * ncols ; for (irow=0 ; irow<n_img_rows ; irow++) { for (icol=0 ; icol<n_img_cols ; icol++) { for (iband=0 ; iband<n_img_bands ; iband++) fdata[j++] = (float) xptr[(iband*n_img_rows+irow)*n_img_cols+icol] ; } } } assert ( j == n_cases * n_pred ) ; error_id = cudaMemcpy ( h_predictors , fdata , n_cases * n_pred * sizeof(float) , cudaMemcpyHostToDevice ) ; FREE ( fdata ) ; fdata = NULL ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_predictors , &h_predictors , sizeof(float *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad predictors copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Classes; we convert the 1/0 binary output target vector to integer classes */ iclass = (int *) MALLOC ( n_cases * sizeof(int) ) ; if (iclass == NULL) return ERROR_INSUFFICIENT_MEMORY ; memsize = n_cases * sizeof(int) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_class , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC class = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_class, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc class (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } for (i=0 ; i<n_cases ; i++) { best = -1.e60 ; xptr = data + i * ncols + n_pred ; for (j=0 ; j<n_classes ; j++) { if (xptr[j] > best) { best = xptr[j] ; ibest = j ; } } iclass[i] = ibest ; } error_id = cudaMemcpy ( h_class , iclass , n_cases * sizeof(int) , cudaMemcpyHostToDevice ) ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_class , &h_class , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad class copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } FREE ( iclass ) ; /* Activations (hidden layers only) ordered (row, col, slice) */ if (n_layers) { n_total = 0 ; for (i=0 ; i<n_layers ; i++) // All hidden layers, but not output n_total += nhid[i] ; memsize = n_total * max_batch * sizeof(double) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &activations , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC activations = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) activations, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc activations (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (i=0 ; i<n_layers ; i++) { dptr[i] = activations + n_total * max_batch ; n_total += nhid[i] ; } error_id = cudaMemcpyToSymbol ( d_act , &dptr[0] , n_layers * sizeof(double *) , 0 , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad act ptr copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } } else activations = NULL ; /* poolmax_id (POOLMAX layers only) ordered (row, col, slice) */ for (ilayer=0 ; ilayer<n_layers ; ilayer++) { if (layer_type[ilayer] == TYPE_POOLMAX) { memsize = nhid[ilayer] * max_batch * sizeof(int) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_poolmax_id[ilayer] , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC Layer %d poolmax_id = %llx (%d bytes, total=%.2lf MB)", ilayer, (unsigned long long) h_poolmax_id[ilayer], memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc poolmax_id (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } } else h_poolmax_id[ilayer] = NULL ; } error_id = cudaMemcpyToSymbol ( d_poolmax_id , &h_poolmax_id[0] , n_layers * sizeof(int *) , 0 , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad poolmax_id ptr copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Outputs */ memsize = n_cases * n_classes * sizeof(double) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_output , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC output = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_output, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_output , &h_output , sizeof(float *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc output (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } /* Weights These are stored as the transpose of those in Host, with the neurons in the 'current' layer changing fastest. Within each layer's weight matrix, rows (sets of current layer weights) are stored starting on 128-byte boundaries. Thus, n_weights_on_device is generally larger than n_weights, because it takes into account row padding. Neuron layout in each layer is (height, width, depth). */ n_weights_on_device = 0 ; for (ilayer=0 ; ilayer<= n_layers ; ilayer++) { // For each of the hidden layers, plus the final if (ilayer == n_layers) n_weights_on_device += n_classes_cols * n_prior_weights[ilayer] ; else if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_weights_on_device += nhid_cols[ilayer] * n_prior_weights[ilayer] ; // Add in weights for this layer else if (layer_type[ilayer] == TYPE_CONV) n_weights_on_device += depth_cols[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same weights for every hidden neuron in visible field else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_weights_on_device += 0 ; // Just for clarity; pooling has no trainable weights } // For ilayer memsize = n_weights_on_device * sizeof(float) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &weights , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC weights = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) weights, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc weights (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (ilayer=0 ; ; ilayer++) { // For each of the hidden layers, plus the final fptr[ilayer] = weights + n_total ; if (ilayer >= n_layers) break ; if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_total += nhid_cols[ilayer] * n_prior_weights[ilayer] ; // Add in weights for this layer else if (layer_type[ilayer] == TYPE_CONV) n_total += depth_cols[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same weights for every hidden neuron in visible field in a slice else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_total += 0 ; // Just for clarity; pooling has no trainable weights } // For ilayer error_id = cudaMemcpyToSymbol ( d_weights , &fptr[0] , (n_layers+1) * sizeof(float *) , 0 , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad weight ptr copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* Gradient We allocate for max_batch complete gradient vectors, and d_grad will be pointers to the first set. Subsequent sets are addressed by adding k * n_weights to the first set. */ memsize = n_weights * max_batch * sizeof(float) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &grad , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC grad = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) grad, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc grad (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } n_total = 0 ; for (ilayer=0 ; ; ilayer++) { // For each of the hidden layers, plus the final fptr[ilayer] = grad + n_total ; if (ilayer >= n_layers) break ; if (layer_type[ilayer] == TYPE_FC || layer_type[ilayer] == TYPE_LOCAL) n_total += nhid[ilayer] * n_prior_weights[ilayer] ; // Add in grad for this layer else if (layer_type[ilayer] == TYPE_CONV) n_total += depth[ilayer] * n_prior_weights[ilayer] ; // A convolution layer uses the same grad for every hidden neuron in visible field else if (layer_type[i] == TYPE_POOLAVG || layer_type[i] == TYPE_POOLMAX) n_total += 0 ; // Just for clarity; pooling has no trainable grad } // For ilayer (each hidden layer) error_id = cudaMemcpyToSymbol ( d_grad , &fptr[0] , (n_layers+1) * sizeof(float *) , 0 , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad weight ptr copy %d: %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_ERROR ; } /* convgrad_work - Scratch vector for unflattened convolution layer gradient */ max_convgrad_work = 0 ; // Will find the max work area needed for (ilayer=0 ; ilayer<n_layers ; ilayer++) { if (layer_type[ilayer] == TYPE_CONV) { convgrad_cols[ilayer] = (n_prior_weights[ilayer] + 31) / 32 * 32 ; // CONV scratch is zero padded for full coalescing n_max = 1024 * 1024 * max_mem_grad / (max_batch * convgrad_cols[ilayer] * sizeof(float)) ; // Launch limit satisfying memory divisor = 1 ; // Figure out how much we have to divide slices to meet max_hid_grad and max_mem_grad limits for ( ;; ) { j = depth[ilayer] / divisor * height[ilayer] * width[ilayer] ; // We will launch this many hid at a time if (j <= max_hid_grad && j <= n_max) break ; ++divisor ; } j = depth[ilayer] / divisor * height[ilayer] * width[ilayer] ; // We will launch this many hid at a time if (j < height[ilayer] * width[ilayer]) // Careless user specified it too small, so ignore request j = height[ilayer] * width[ilayer] ; // At this time, j is the number of hidden neurons per launch if (j * convgrad_cols[ilayer] > max_convgrad_work) max_convgrad_work = j * convgrad_cols[ilayer] ; // This many weights will be computed in a launch (per case) // Print info for user cudalog ( "" ) ; sprintf_s ( msg, "Gradient computation for layer %d will use %d launches, each max %d hidden neurons", ilayer+1, (depth[ilayer] * height[ilayer] * width[ilayer] + j - 1) / j, j ) ; cudalog ( msg ) ; threads_per_block = (n_prior_weights[ilayer] + 31) / 32 * 32 ; if (threads_per_block > 4 * 32) threads_per_block = 4 * 32 ; sprintf_s ( msg, "Launch parameters: Threads per block=%d with %d thread (x) blocks", threads_per_block, (n_prior_weights[ilayer] + threads_per_block - 1) / threads_per_block) ; cudalog ( msg ) ; sprintf_s ( msg, " Max Y dimension (n hidden) = %d; max Z dimension (cases) = %d", j, max_batch_size ) ; cudalog ( msg ) ; } else convgrad_cols[ilayer] = 0 ; // Not needed } cudaMemcpyToSymbol ( d_max_convgrad_each , &max_convgrad_work , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; cudaMemcpyToSymbol ( d_convgrad_cols , convgrad_cols , n_layers * sizeof(int) , 0 , cudaMemcpyHostToDevice ) ; // For storing gradient, we need prior weights and cases in batch if (max_convgrad_work) { // Must not have integer overflow in memory size // At this moment, max_convgrad_work is the max number of weights (neurons times prior) in a launch batch_size_limit = MAXPOSNUM / (max_convgrad_work * sizeof(float)) ; // Memory allocation size if (max_batch > batch_size_limit) { audit ( "ERROR... User specified number of training cases per subset too large. Please reduce." ) ; cudalog ( "Device initialization error: training cases per subset too large." ) ; sprintf_s ( error_msg , 255 , "User ERROR: Architecture and CUDA params limit subset to %d cases", batch_size_limit ) ; return ERROR_CUDA_MEMORY ; } max_convgrad_work *= max_batch ; memsize = max_convgrad_work * sizeof(float) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_convgrad_work , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC convgrad_work = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_convgrad_work, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_convgrad_work , &h_convgrad_work , sizeof(float *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc convgrad_work (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } } else h_convgrad_work = NULL ; /* This delta, next delta */ n_max = n_classes ; for (i=0 ; i<n_layers ; i++) { if (nhid[i] > n_max) n_max = nhid[i] ; } memsize = n_max * max_batch * sizeof(double) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_this_delta , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC this_delta = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_this_delta, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_this_delta , &h_this_delta , sizeof(double *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc this_delta (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } memsize = n_max * max_batch * sizeof(double) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_prior_delta , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC prior_delta = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_prior_delta, memsize, total_memory / (1024 * 1024) ) ; cudalog ( msg ) ; if (error_id == cudaSuccess) error_id = cudaMemcpyToSymbol ( d_prior_delta , &h_prior_delta , sizeof(double *) , 0 , cudaMemcpyHostToDevice ) ; else { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc prior_delta (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } /* Log likelihood reduction stuff */ memsize = REDUC_BLOCKS * sizeof(float) ; total_memory += memsize ; error_id = cudaMalloc ( (void **) &h_ll_out , (size_t) memsize ) ; sprintf_s ( msg, 255 , "CUDA MALLOC ll_out = %llx (%d bytes, total=%.2lf MB)", (unsigned long long) h_ll_out, memsize, total_memory / (1024 * 1024) ) ; MEMTEXT ( msg ) ; cudalog ( msg ) ; if (error_id != cudaSuccess) { sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc ll_out (%d): %s", error_id, cudaGetErrorString(error_id) ) ; return ERROR_CUDA_MEMORY ; } cudaMemcpyToSymbol ( d_ll_out , &h_ll_out , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ; MEMTEXT ( "CUDA init reduc_fdata" ) ; reduc_fdata = (float *) MALLOC ( REDUC_BLOCKS * sizeof(float) ) ; if (reduc_fdata == NULL) { sprintf_s ( error_msg , 255 , "CUDA init bad MALLOC reduc_fdata" ) ; return ERROR_INSUFFICIENT_MEMORY ; // New error return } /* Allocate fdata large enough to handle all subsequent double <-> float transactions This remains allocated until cuda_cleanup() is called, because it is used often in launches. */ n_max = max_convgrad_work ; if (n_weights_on_device > n_max) n_max = n_weights_on_device ; fdata = (float *) MALLOC ( n_max * sizeof(float) ) ; if (fdata == NULL) return ERROR_INSUFFICIENT_MEMORY ; MEMTEXT ( "MOD_CUDA.cu: cuda_init ending" ) ; return 0 ; } /* -------------------------------------------------------------------------------- cuda_weights_to_device - Called from MOD_CUDA.CPP to copy weights HOST weights: In a CONV layer, weight order is: Layer depth Input slice Input height Input width Bias In a LOCAL layer, weight order is: Layer depth Layer height Layer width Input slice Input height Input width Bias CUDA weights: In a CONV layer, weight order is: Input height Input width Input slice Bias Layer depth Pad so layer depth is a multiple of 128 In a LOCAL layer, weight order is: Input height Input width Input slice Bias Layer height Layer width Layer depth Pad so nhid = layer height*width*depth is a multiple of 128 A fully connected layer has height=width=1; all neurons are depth. -------------------------------------------------------------------------------- */ int cuda_weights_to_device ( int n_classes , // Number of outputs int n_layers , // Hidden layers; does not include output int *layer_type , // Each entry (input to final) is TYPE_? in CONST.H int img_rows , // Size of input image int img_cols , int img_bands , int *height , // Height of visible field in each layer int *width , // Width of visible field int *depth , // Number of slices in each layer int *nhid , // Number of hidden neurons in each layer int *hwH , // Half-width of filters int *hwV , double **host_weights ) // Vector of pointers to weights for each layer { int n, n_prior, ilayer, ineuron, isub, n_cols_each ; int idepth, iheight, iwidth, ndepth, nheight, nwidth ; int in_row, in_col, in_slice, in_n_height, in_n_width, in_n_depth ; double *wptr ; float *fptr ; char msg[256] ; cudaError_t error_id ; fptr = fdata ; for (ilayer=0 ; ilayer<=n_layers ; ilayer++) { wptr = host_weights[ilayer] ; /* Fully connected */ if (ilayer == n_layers || layer_type[ilayer] == TYPE_FC) { if (ilayer == 0) { in_n_height = img_rows ; in_n_width = img_cols ; in_n_depth = img_bands ; } else { in_n_height = height[ilayer-1] ; in_n_width = width[ilayer-1] ; in_n_depth = depth[ilayer-1] ; } n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias if (ilayer == n_layers) n = n_classes ; // Equals depth else n = nhid[ilayer] ; // Equals depth n_cols_each = (n + 31) / 32 * 32 ; // For memory alignment to 128 bytes for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (idepth=0 ; idepth<n ; idepth++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row // Bias for (idepth=0 ; idepth<n ; idepth++) { // Compute location of this neuron's bias in host isub = idepth * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } /* LOCAL */ else if (layer_type[ilayer] == TYPE_LOCAL) { // For LOCAL layers, neuron layout in current layer is (height, width, depth). n = nhid[ilayer] ; n_cols_each = (n + 31) / 32 * 32 ; // For memory alignment to 128 bytes ndepth = depth[ilayer] ; nheight = height[ilayer] ; nwidth = width[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth } // For iwidth } // For iheight ineuron = nhid[ilayer] ; while (ineuron++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row // Bias for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight vector in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth } // For iwidth } // For iheight ineuron = nhid[ilayer] ; while (ineuron++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } /* CONV */ else if (layer_type[ilayer] == TYPE_CONV) { nheight = height[ilayer] ; nwidth = width[ilayer] ; ndepth = depth[ilayer] ; n_cols_each = (ndepth + 31) / 32 * 32 ; // For memory alignment to 128 bytes in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } // For in_slice } // For in_col } // For in_row //Bias for (idepth=0 ; idepth<ndepth ; idepth++) { // Compute location of this neuron's bias in host isub = idepth * n_prior + n_prior - 1 ; *fptr++ = (float) wptr[isub] ; } // For idepth while (idepth++ < n_cols_each) // Pad to multiple of 128 bytes *fptr++ = 0.0f ; } else if (layer_type[ilayer] == TYPE_POOLAVG || layer_type[ilayer] == TYPE_POOLMAX) { n = 0 ; // Not needed. Just for clarity. } } // For ilayer assert ( fptr == fdata + n_weights_on_device ) ; error_id = cudaMemcpy ( weights , fdata , n_weights_on_device * sizeof(float) , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "CUDA ERROR: bad weights_to_device hid %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( "" ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return ERROR_CUDA_ERROR ; } return 0 ; } /* -------------------------------------------------------------------------------- hidden_activation_FC - Compute activations for an FC hidden layer -------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_FC ( int istart , // First case in this batch int istop , // One past last case int ilayer // Layer to process ) { int icase, ihid, i_input, n_inputs, nhid_cols ; float *f_inptr, *wptr ; double sum, *actptr, *d_inptr ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; nhid_cols = d_nhid_cols[ilayer] ; icase = blockIdx.y ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest sum = 0.0 ; if (ilayer == 0) { n_inputs = d_n_pred ; f_inptr = d_predictors + (icase+istart)*n_inputs ; for (i_input=0 ; i_input<n_inputs ; i_input++) { sum += *wptr * f_inptr[i_input] ; wptr += nhid_cols ; } sum += *wptr ; // Bias } else { n_inputs = d_nhid[ilayer-1] ; d_inptr = d_act[ilayer-1] + icase*n_inputs ; for (i_input=0 ; i_input<n_inputs ; i_input++) { sum += *wptr * d_inptr[i_input] ; wptr += nhid_cols ; } sum += *wptr ; // Bias } if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } actptr = d_act[ilayer] ; actptr[icase*d_nhid[ilayer]+ihid] = sum ; } int cuda_hidden_activation_FC ( int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; device_hidden_activation_FC <<< block_launch , threads_per_block >>> ( istart , istop , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_FC launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* --------------------------------------------------------------------------------- hidden_activation_LOCAL_CONV - Activations for a LOCAL or CONV hidden layer --------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int case_start , // First case in this batch (relative to dataset) int case_offset , // Offset relative to this batch (used in shared version) int slice_start , // First slice in this batch int n_slices , // Number of slices to be done in this launch int ilayer // Layer to process ) { int kwt, kin, wtsub, insub, iheight, iwidth, idepth, n_height, n_width, n_depth, wt_cols, ihid ; int rstart, rstop, cstart, cstop, rbase, cbase, in_slice, in_row, in_col, nH ; float *f_inptr, *wptr ; double sum, *actptr ; idepth = blockIdx.x * blockDim.x + threadIdx.x ; if (idepth >= n_slices) return ; idepth += slice_start ; iheight = blockIdx.y / d_width[ilayer] ; iwidth = blockIdx.y % d_width[ilayer] ; nH = 2 * d_HalfWidH[ilayer] + 1 ; // We are about to compute the activation of neuron (iheight, iwidth, idepth) in this layer. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. // Also, the weights are ordered so that depth-fastest changes produce perfect or very good coalescing. // Thus, neuron layout in current layer is (height, width, depth). // This gives strong motivation for LOCAL layers to have depth a multiple of 32. // To see why, note the ihid= below. That multiplication ensures perfect coalescing of the weight fetches. // icase = blockIdx.z ; // Avoid having to declare this (and use a register) by directly referencing it later if (local_vs_conv) { wt_cols = d_nhid_cols[ilayer] ; // Padded size of weight matrix rows; each has nhid data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) ihid = (iheight * d_width[ilayer] + iwidth) * d_depth[ilayer] + idepth ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest } // Order is (height, width, depth) else { wt_cols = d_depth_cols[ilayer] ; // Padded size of weight matrix rows; each has depth[ilayer] data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) // A convolutional layer has a different weight set for each slice, // but the same weight set for all neurons (visual field placement) in a slice. wptr = d_weights[ilayer] + idepth ; // First filter weight for this slice is here; subsequent weights spaced by wt_cols } sum = 0.0 ; // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. rbase = rstart = d_strideV[ilayer] * iheight - d_padV[ilayer] ; rstop = rstart + 2 * d_HalfWidV[ilayer] ; cbase = cstart = d_strideH[ilayer] * iwidth - d_padH[ilayer] ; cstop = cstart + 2 * d_HalfWidH[ilayer] ; if (rstart < 0) rstart = 0 ; if (cstart < 0) cstart = 0 ; // It's annoying and messy, but we must duplicate the same code for the case of this being the // first hidden layer (fed by the input) versus a subsequent hidden layer (fed by prior activations). // This is because the input uses a float pointer, and activations a double pointer. // Deciding in the inner loop would be too slow! if (ilayer == 0) { f_inptr = d_predictors + (blockIdx.z + case_offset + case_start) * d_n_pred ; if (rstop >= d_img_rows) rstop = d_img_rows - 1 ; if (cstop >= d_img_cols) cstop = d_img_cols - 1 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { kwt = (in_row - rbase) * nH ; kin = in_row*d_img_cols ; for (in_col=cstart ; in_col<=cstop ; in_col++) { wtsub = (kwt + in_col - cbase) * d_img_bands ; insub = (kin+in_col) * d_img_bands ; for (in_slice=0 ; in_slice<d_img_bands ; in_slice++) { // wtsub = ((in_row - rbase) * nH + in_col - cbase) * d_img_bands + in_slice ; // insub = (in_row*d_img_cols+in_col)*d_img_bands+in_slice ; sum += f_inptr[insub] * wptr[wtsub*wt_cols] ; ++wtsub ; ++insub ; } // For in_slice } // For in_col } // For in_row sum += wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; // Bias } else { actptr = d_act[ilayer-1] + (blockIdx.z + case_offset) * d_nhid[ilayer-1] ; n_height = d_height[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; if (rstop >= n_height) rstop = n_height - 1 ; if (cstop >= n_width) cstop = n_width - 1 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { kwt = (in_row - rbase) * nH ; kin = in_row*n_width ; for (in_col=cstart ; in_col<=cstop ; in_col++) { wtsub = (kwt + in_col - cbase) * n_depth ; insub = (kin+in_col) * n_depth ; for (in_slice=0 ; in_slice<d_depth[ilayer-1] ; in_slice++) { // wtsub = ((in_row - rbase) * nH + in_col - cbase) * n_depth + in_slice ; // insub = (in_row*n_width+in_col)*n_depth+in_slice ; sum += actptr[insub] * wptr[wtsub*wt_cols] ; ++wtsub ; ++insub ; } // For in_slice } // For in_col } // For in_row sum += wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; // Bias } if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } n_height = d_height[ilayer] ; n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; actptr = d_act[ilayer] ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) actptr[(blockIdx.z+case_offset)*d_nhid[ilayer]+ihid] = sum ; } int cuda_hidden_activation_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = istop - istart ; device_hidden_activation_LOCAL_CONV <<< block_launch , threads_per_block >>> ( local_vs_conv , istart , 0 , 0 , n_slices , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } __global__ void device_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int ilayer // Layer to process ) { int k, iheight, iwidth, idepth, icase, n_height, n_width, n_depth, wt_cols ; int ihid, inner, n_inner, inner_blocks, prod ; int rstart, rstop, cstart, cstop, rbase, cbase, in_slice, in_row, in_col, isub, nH ; float *f_inptr, *wptr ; double value, sum, *actptr ; // In a block, threadIdx.x and threadIdx.y are the location within the BLOCK_SIZE square block. // The entire matrix of cases (rows) by slices (column) is divided into these blocks, // each of which is a launched block whose location in the entire matrix is given by blockIdx.x and blockIdx.y. // The sharing logic ignores blockIdx.z, which is just the location in the visual field. // The next four quantities identify the location within the entire matrix. idepth = blockIdx.x * BLOCK_SIZE + threadIdx.x ; icase = blockIdx.y * BLOCK_SIZE + threadIdx.y ; iheight = blockIdx.z / d_width[ilayer] ; iwidth = blockIdx.z % d_width[ilayer] ; nH = 2 * d_HalfWidH[ilayer] + 1 ; // Horizontal width of the filter // This thread will compute the activation of neuron (iheight, iwidth, idepth) for case icase. // However, the first step is for the threads in this block to cooperatively do the global // loads into shared memory of the weights and inputs relevant to this block. // We do this in a loop which covers the 'inner' (n_inner) dimension of the matrix multiplication. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. // Also, the weights are ordered so that depth-fastest changes produce perfect or very good coalescing. // Thus, neuron layout in current layer is (row, column, slice). // This gives strong motivation for LOCAL layers to have depth a multiple of 32. // To see why, note the ihid= below. That multiplication ensures perfect coalescing of the weight fetches. if (local_vs_conv) { wt_cols = d_nhid_cols[ilayer] ; // Padded size of weight matrix rows; each has nhid data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) ihid = (iheight * d_width[ilayer] + iwidth) * d_depth[ilayer] + idepth ; wptr = d_weights[ilayer] + ihid ; // Device weights are transpose of host weights, with this neuron changing fastest } // Order is (height, width, depth) else { wt_cols = d_depth_cols[ilayer] ; // Padded size of weight matrix rows; each has depth[ilayer] data values, then zero padding // There are n_prior_weights rows (prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1) // A convolutional layer has a different weight set for each slice, // but the same weight set for all neurons (visual field placement) in a slice. wptr = d_weights[ilayer] + idepth ; // First filter weight for this slice is here; subsequent weights spaced by wt_cols } // Get a pointer to and the size of the prior-layer feeding this layer if (ilayer == 0) { f_inptr = d_predictors + (icase + istart) * d_n_pred ; n_height = d_img_rows ; n_width = d_img_cols ; n_depth = d_img_bands ; } else { actptr = d_act[ilayer-1] + icase * d_nhid[ilayer-1] ; n_height = d_height[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; } // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. // These quantities are independent of the depth (column here) and case (row here). rbase = rstart = d_strideV[ilayer] * iheight - d_padV[ilayer] ; rstop = rstart + 2 * d_HalfWidV[ilayer] ; cbase = cstart = d_strideH[ilayer] * iwidth - d_padH[ilayer] ; cstop = cstart + 2 * d_HalfWidH[ilayer] ; if (rstart < 0) rstart = 0 ; if (cstart < 0) cstart = 0 ; if (rstop >= n_height) rstop = n_height - 1 ; if (cstop >= n_width) cstop = n_width - 1 ; // The prep work is done. We now cooperatively do the global load. // This thread will handle Row threadIdx.y, Column threadIdx.x of the BLOCK_SIZE square block // in a loop over all inner blocks. // In each pass, we start by computing the ordinal position in the filter dot product loop. prod = (cstop-cstart+1) * n_depth ; // Each prior-layer row has this many elements n_inner = (rstop-rstart+1) * prod + 1 ; // This many terms in inner sum (+1 is for bias) inner_blocks = (n_inner + BLOCK_SIZE - 1) / BLOCK_SIZE ; // We will process this many 'inner' blocks sum = 0.0 ; for (inner=0 ; inner<inner_blocks ; inner++) { __shared__ double s_cases[BLOCK_SIZE][BLOCK_SIZE] ; __shared__ float s_slices[BLOCK_SIZE][BLOCK_SIZE] ; // Slice; We will sum over FIRST index (y) of s_slices isub = inner * BLOCK_SIZE + threadIdx.y ; // Ordinal position in filter dot product loop if (isub >= n_inner) // Outside inner block value = 0.0 ; else if (isub == n_inner-1) // Bias value = wptr[(d_n_prior_weights[ilayer]-1) * wt_cols] ; else { in_row = isub / prod ; k = isub - in_row * prod ; in_col = k / n_depth ; in_slice = k % n_depth ; in_row += rstart ; in_col += cstart ; isub = ((in_row - rbase) * nH + in_col - cbase) * n_depth + in_slice ; value = wptr[isub*wt_cols] ; } s_slices[threadIdx.y][threadIdx.x] = value ; // Case; We will sum over SECOND index (x) of s_cases isub = inner * BLOCK_SIZE + threadIdx.x ; // Ordinal position in filter dot product loop if (isub >= n_inner) // Outside inner block value = 0.0 ; else if (isub == n_inner-1) // Bias value = 1.0 ; else { in_row = isub / prod ; k = isub - in_row * prod ; in_col = k / n_depth ; in_slice = k % n_depth ; in_row += rstart ; in_col += cstart ; isub = (in_row*n_width+in_col)*n_depth+in_slice ; if (ilayer == 0) value = f_inptr[isub] ; else value = actptr[isub] ; } s_cases[threadIdx.y][threadIdx.x] = value ; __syncthreads () ; for (k=0 ; k<BLOCK_SIZE ; k++) sum += s_cases[threadIdx.y][k] * s_slices[k][threadIdx.x] ; __syncthreads () ; } // For inner if (sum > MAX_EXP) sum = 1.0 ; else { sum = exp ( 2.0 * sum ) ; sum = (sum - 1.0) / (sum + 1.0) ; } n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; actptr = d_act[ilayer] ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) actptr[icase*d_nhid[ilayer]+ihid] = sum ; // Perfectly coalesced if depth and nhid multiples of 32 } int cuda_hidden_activation_LOCAL_CONV_shared ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int nc, warpsize, threads_per_block ; char msg[256] ; dim3 thread_launch, block_launch ; cudaError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; /* If possible (it normally would be), handle as much as possible with the more efficient shared-memory method. But if not, just use the non-shared method. */ nc = istop - istart ; if (n_slices < BLOCK_SIZE || nc < BLOCK_SIZE) return cuda_hidden_activation_LOCAL_CONV ( local_vs_conv , istart , istop , nhid , n_slices , ilayer ) ; thread_launch.x = BLOCK_SIZE ; thread_launch.y = BLOCK_SIZE ; thread_launch.z = 1 ; block_launch.x = n_slices / BLOCK_SIZE ; block_launch.y = nc / BLOCK_SIZE ; block_launch.z = nhid / n_slices ; // Height times width; visual field size device_hidden_activation_LOCAL_CONV_shared <<< block_launch , thread_launch >>> ( local_vs_conv , istart , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV_shared launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } /* Clean up any extra slices */ if (n_slices % BLOCK_SIZE) { threads_per_block = n_slices % BLOCK_SIZE ; block_launch.x = 1 ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = nc ; device_hidden_activation_LOCAL_CONV <<< block_launch , threads_per_block >>> ( local_vs_conv , istart , 0 , n_slices / BLOCK_SIZE * BLOCK_SIZE , n_slices % BLOCK_SIZE , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch (shared 1) error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } /* Clean up any extra cases */ if (nc % BLOCK_SIZE) { warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices / BLOCK_SIZE * BLOCK_SIZE + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices / BLOCK_SIZE * BLOCK_SIZE + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = nc % BLOCK_SIZE ; device_hidden_activation_LOCAL_CONV <<< block_launch , threads_per_block >>> ( local_vs_conv , istart, nc / BLOCK_SIZE * BLOCK_SIZE , 0 , n_slices / BLOCK_SIZE * BLOCK_SIZE , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_LOCAL_CONV launch (shared 2) error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } return 0 ; } /* --------------------------------------------------------------------------------- hidden_activation_POOLED - Activations for a POOLAVG or POOLMAX hidden layer --------------------------------------------------------------------------------- */ __global__ void device_hidden_activation_POOLED ( int avg_vs_max , // Is this a POOLAVG (vs POOLMAX) layer? int istart , // First case in this batch int ilayer // Layer to process ) { int icase, iheight, iwidth, idepth, n_width, n_depth, ihid ; int rstart, rstop, cstart, cstop, in_row, in_col, *poolmax_id_ptr ; float *f_inptr ; double x, *actptr, value ; idepth = blockIdx.x * blockDim.x + threadIdx.x ; if (idepth >= d_depth[ilayer]) return ; n_width = d_width[ilayer] ; n_depth = d_depth[ilayer] ; iheight = blockIdx.y / n_width ; iwidth = blockIdx.y % n_width ; ihid = (iheight * n_width + iwidth) * n_depth + idepth ; // Activity for any layer type is (height, width, depth) // We are about to compute the activation of neuron (iheight, iwidth, idepth) in this layer. // Note that it is critical that idepth be associated with the thread. // This ensures that adjacent threads reference the same input, which allows efficient memory use. icase = blockIdx.z ; rstart = d_strideV[ilayer] * iheight ; rstop = rstart + d_PoolWidV[ilayer] - 1 ; cstart = d_strideH[ilayer] * iwidth ; cstop = cstart + d_PoolWidH[ilayer] - 1 ; // It's annoying and messy, but we must duplicate the same code for the case of this being the // first hidden layer (fed by the input) versus a subsequent hidden layer (fed by prior activations). // This is because the input uses a float pointer, and activations a double pointer. // Deciding in the inner loop would be too slow! if (ilayer == 0) { f_inptr = d_predictors + (icase + istart) * d_n_pred ; if (avg_vs_max) { value = 0.0 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) value += f_inptr[(in_row*d_img_cols+in_col)*d_img_bands+idepth] ; } // For in_row value /= d_PoolWidV[ilayer] * d_PoolWidH[ilayer] ; } else { poolmax_id_ptr = &d_poolmax_id[ilayer][ihid] + icase * d_nhid[ilayer] ; value = -1.e60 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) { x = f_inptr[(in_row*d_img_cols+in_col)*d_img_bands+idepth] ; if (x > value) { value = x ; *poolmax_id_ptr = in_row * d_img_cols + in_col ; // Save id of max for backprop pass } } // For in_col } // For in_row } // POOLMAX } // If first hidden layer else { actptr = d_act[ilayer-1] + icase * d_nhid[ilayer-1] ; n_width = d_width[ilayer-1] ; n_depth = d_depth[ilayer-1] ; if (avg_vs_max) { value = 0.0 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) value += actptr[(in_row*n_width+in_col)*n_depth+idepth] ; } // For in_row value /= d_PoolWidV[ilayer] * d_PoolWidH[ilayer] ; } else { poolmax_id_ptr = &d_poolmax_id[ilayer][ihid] + icase * d_nhid[ilayer] ; value = -1.e60 ; for (in_row=rstart ; in_row<=rstop ; in_row++) { for (in_col=cstart ; in_col<=cstop ; in_col++) { x = actptr[(in_row*n_width+in_col)*n_depth+idepth] ; if (x > value) { value = x ; *poolmax_id_ptr = in_row * d_width[ilayer-1] + in_col ; // Save id of max for backprop pass } } // For in_col } // For in_row } // POOLMAX } actptr = d_act[ilayer] ; actptr[icase*d_nhid[ilayer]+ihid] = value ; } int cuda_hidden_activation_POOLED ( int avg_vs_max , // Is this a POOLAVG (vs POOLMAX) layer? int istart , // First case in this batch int istop , // One past last case int nhid , // Number of hidden neurons in this layer int n_slices , // Depth of this layer int ilayer // Layer to process ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; // nhid = height * width * depth assert ( nhid % n_slices == 0 ) ; assert ( nhid / n_slices <= 65535 ) ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_slices + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_slices + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid / n_slices ; // Height times width; visual field size block_launch.z = istop - istart ; device_hidden_activation_POOLED <<< block_launch , threads_per_block >>> ( avg_vs_max , istart , ilayer ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_activation_POOLED launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- output_activation_no_hidden - Compute activations for the output layer This version is for when there is no hidden layer. ----------------------------------------------------------------------------------- */ __global__ void device_output_activation_no_hidden ( int istart // First case in this batch ) { int icase, iout, i_input ; double sum ; float *wptr, *inptr ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; wptr = d_weights[0] + iout ; // Weights on device have current neuron changing fastest inptr = d_predictors + (icase + istart) * d_n_pred ; sum = 0.0 ; for (i_input=0 ; i_input<d_n_pred ; i_input++) { // Weights are transpose of Host, with target changing fastest sum += *wptr * inptr[i_input] ; wptr += d_n_classes_cols ; } sum += *wptr ; // Bias d_output[(icase+istart)*d_n_classes+iout] = sum ; } int cuda_output_activation_no_hidden ( int istart , // First case in this batch int istop // One past last case ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_classes + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_classes + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; device_output_activation_no_hidden <<< block_launch , threads_per_block >>> ( istart ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_output_activation_no_hidden launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- output_activation - Compute activations for the output layer This version is for when there is at least one hidden layer. ----------------------------------------------------------------------------------- */ __global__ void device_output_activation ( int istart // First case in this batch; needed for output ) { int icase, iout, i_input, n_inputs ; double sum ; float *wptr ; double *inptr ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; // Activities are zero origin, not offset by istart wptr = d_weights[d_n_layers] + iout ; // Weights on device have current neuron changing fastest n_inputs = d_nhid[d_n_layers-1] ; inptr = d_act[d_n_layers-1] + icase * n_inputs ; sum = 0.0 ; for (i_input=0 ; i_input<n_inputs ; i_input++) { // Weights are transpose of Host, with target changing fastest sum += *wptr * inptr[i_input] ; wptr += d_n_classes_cols ; } sum += *wptr ; // Bias d_output[(icase+istart)*d_n_classes+iout] = sum ; } int cuda_output_activation ( int istart , // First case in this batch int istop // One past last case ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_classes + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_classes + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; device_output_activation <<< block_launch , threads_per_block >>> ( istart ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_output_activation launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- softmax - Do SoftMax modification of outputs for a batch -------------------------------------------------------------------------------- */ __global__ void device_softmax ( int istart , // First case in this batch int istop // One past last case ) { int icase, iout ; double *outptr, sum ; icase = blockIdx.x * blockDim.x + threadIdx.x ; if (icase >= istop - istart) return ; outptr = d_output + (icase + istart) * d_n_classes ; // Output vector for this case sum = 0.0 ; for (iout=0 ; iout<d_n_classes ; iout++) { if (outptr[iout] < MAX_EXP) outptr[iout] = exp ( outptr[iout] ) ; else outptr[iout] = exp ( MAX_EXP ) ; sum += outptr[iout] ; } for (iout=0 ; iout<d_n_classes ; iout++) outptr[iout] /= sum ; } int cuda_softmax ( int istart , // First case in this batch int istop // One past last case ) { int n, warpsize, blocks_per_grid, threads_per_block ; char msg[256] ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future n = istop - istart ; // Number of elements threads_per_block = (n + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; blocks_per_grid = (n + threads_per_block - 1) / threads_per_block ; device_softmax <<< blocks_per_grid , threads_per_block >>> ( istart , istop ) ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_softmax launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ------------------------------------------------------------------------------------------------ cuda_cpx_ll - Given output activations and true classes, compute log likelihood This would be called after the entire training set is processed, not in batches. ------------------------------------------------------------------------------------------------ */ __global__ void device_ll () { __shared__ double partial_ll[REDUC_THREADS] ; int i, n, n_classes, index ; double sum_ll ; index = threadIdx.x ; n = d_ncases ; n_classes = d_n_classes ; sum_ll = 0.0 ; for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) sum_ll -= log ( d_output[i*n_classes+d_class[i]] + 1.e-30 ) ; partial_ll[index] = sum_ll ; __syncthreads() ; for (i=blockDim.x>>1 ; i ; i>>=1) { if (index < i) partial_ll[index] += partial_ll[index+i] ; __syncthreads() ; } if (index == 0) d_ll_out[blockIdx.x] = partial_ll[0] ; } int cuda_ll ( int n , // Number of values; n_cases double *ll // Computed log likelihood ) { int i, blocks_per_grid ; double sum ; char msg[256] ; cudaError_t error_id ; blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ; if (blocks_per_grid > REDUC_BLOCKS) blocks_per_grid = REDUC_BLOCKS ; device_ll <<< blocks_per_grid , REDUC_THREADS >>> () ; // This does not trigger an escape, but it keeps the message queue running user_pressed_escape () ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_ll launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } error_id = cudaMemcpy ( reduc_fdata , h_ll_out , blocks_per_grid * sizeof(float) , cudaMemcpyDeviceToHost ) ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_cpx_ll Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } sum = 0.0 ; for (i=0 ; i<blocks_per_grid ; i++) sum += reduc_fdata[i] ; *ll = sum ; return 0 ; } /* -------------------------------------------------------------------------------- output_delta - Put output delta into this_delta -------------------------------------------------------------------------------- */ __global__ void device_output_delta ( int istart // First case in this batch ) { int icase, iout ; double target ; iout = blockIdx.x * blockDim.x + threadIdx.x ; if (iout >= d_n_classes) return ; icase = blockIdx.y ; target = (iout == d_class[istart+icase]) ? 1.0 : 0.0 ; // The output matrix has all training cases, hence we add istart, but delta is relative to this batch. d_this_delta[icase*d_n_classes+iout] = target - d_output[(istart+icase)*d_n_classes+iout] ; } int cuda_output_delta ( int istart , // First case in this batch int istop , // One past last case int ntarg // Number of targets (outputs, classes) ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (ntarg + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (ntarg + threads_per_block - 1) / threads_per_block ; block_launch.y = istop - istart ; block_launch.z = 1 ; device_output_delta <<< block_launch , threads_per_block >>> ( istart ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_output_delta launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- output_gradient - Compute output layer gradient -------------------------------------------------------------------------------- */ __global__ void device_output_gradient_no_hidden ( int istart , // Index of first case in this batch int nc // Number of cases in batch ) { int icase, iin ; float *gptr ; double input ; iin = blockIdx.x * blockDim.x + threadIdx.x ; if (iin > d_n_pred) return ; icase = blockIdx.y ; if (iin < d_n_pred) input = d_predictors[(istart+icase)*d_n_pred+iin] ; else input = 1.0 ; // Bias // iout = blockIdx.z ; We directly use this below gptr = d_grad[0] + icase * d_n_weights ; // Gradient of output layer gptr[blockIdx.z*(d_n_pred+1)+iin] = d_this_delta[icase*d_n_classes+blockIdx.z] * input ; } __global__ void device_output_gradient ( int nc , // Number of cases in batch int ilayer // Hidden layer which feeds the output layer ) { int icase, ihid, nhid ; float *gptr ; double input ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; nhid = d_nhid[ilayer] ; // Neurons in last hidden layer if (ihid > nhid) return ; icase = blockIdx.y ; if (ihid < nhid) input = d_act[ilayer][icase*nhid+ihid] ; else input = 1.0 ; // Bias // iout = blockIdx.z ; We directly use this below gptr = d_grad[ilayer+1] + icase * d_n_weights ; // Gradient of output layer gptr[blockIdx.z*(nhid+1)+ihid] = d_this_delta[icase*d_n_classes+blockIdx.z] * input ; } int cuda_output_gradient ( int istart , // Index of first case in this batch int nc , // Number of cases in batch int nin , // Number of inputs to last layer int ilayer , // Hidden layer which feeds the output layer int ntarg // Number of targets (outputs, classes) ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nin + 1 + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nin + 1 + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nc ; block_launch.z = ntarg ; if (ilayer < 0) device_output_gradient_no_hidden <<< block_launch , threads_per_block >>> ( istart , nc ) ; else device_output_gradient <<< block_launch , threads_per_block >>> ( nc , ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_output_gradient launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_FC - Backpropagate delta from a fully connected hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_FC ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is FC ) { int j, icase, ihid, nhid, n_next ; float *next_weights ; double *delta_ptr, *prior_delta_ptr, this_act, delta ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; nhid = d_nhid[ilayer] ; // Neurons in this hidden layer if (ihid >= nhid) return ; icase = blockIdx.y ; if (ilayer == d_n_layers-1) { n_next = d_n_classes ; next_weights = d_weights[ilayer+1] + ihid * d_n_classes_cols ; } else { n_next = d_nhid[ilayer+1] ; next_weights = d_weights[ilayer+1] + ihid * d_nhid_cols[ilayer+1] ; } delta_ptr = d_this_delta + icase * n_next ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * nhid ; // Save for the next layer to do, one layer back delta = 0.0 ; for (j=0 ; j<n_next ; j++) delta += delta_ptr[j] * next_weights[j] ; // Weights are transpose of host; later layer changes fastest if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*nhid+ihid] ; delta *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = delta ; // Save it for doing the next layer back } int cuda_backprop_delta_FC ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; device_backprop_delta_FC <<< block_launch , threads_per_block >>> ( ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_FC launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_nonpooled - Backpropagate delta from a locally connected or convolutional hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_nonpooled ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is LOCAL or CONV ) { int k, icase, ihid, next_row, next_col, next_slice, this_row, this_col, this_slice ; int nH, k_next, wt_cols, rstart, cstart, prod, ltype ; int strideH, strideV, padH, padV, height, width, depth ; int next_rstart, next_rstop, next_cstart, next_cstop ; float *weights, *wtptr ; double *this_delta_ptr, *prior_delta_ptr ; double this_act, sum ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; prod = d_width[ilayer] * d_depth[ilayer] ; // Get the 3D coordinates of neuron 'ihid' this_row = ihid / prod ; k = ihid - this_row * prod ; this_col = k / d_depth[ilayer] ; this_slice = k % d_depth[ilayer] ; icase = blockIdx.y ; nH = 2 * d_HalfWidH[ilayer+1] + 1 ; // Horizontal filter size this_delta_ptr = d_this_delta + icase * d_nhid[ilayer+1] ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * d_nhid[ilayer] ; // Save for the next layer to do, one layer back ltype = d_layer_type[ilayer+1] ; strideV = d_strideV[ilayer+1] ; strideH = d_strideH[ilayer+1] ; padV = d_padV[ilayer+1] ; padH = d_padH[ilayer+1] ; height = d_height[ilayer+1] ; width = d_width[ilayer+1] ; depth = d_depth[ilayer+1] ; // this >= next * stride - pad IMPLIES next <= (this + pad) / stride // this <= next * stride - pad + 2 * hw IMPLIES next >= (this + pad - 2 * hw) / stride // We can safely do this in integer arithmetic next_rstop = this_row + padV ; k = next_rstart = next_rstop - 2 * d_HalfWidV[ilayer+1] ; next_rstop /= strideV ; next_rstart /= strideV ; if (k >= 0 && k % strideV) // If the division above was inexact, ++next_rstart ; // we must by pass the fractional part if (next_rstop >= height) next_rstop = height - 1 ; if (next_rstart < 0) next_rstart = 0 ; next_cstop = this_col + padH ; k = next_cstart = next_cstop - 2 * d_HalfWidH[ilayer+1] ; next_cstop /= strideH ; next_cstart /= strideH ; if (k >= 0 && k % strideH) ++next_cstart ; if (next_cstop >= width) next_cstop = width - 1 ; if (next_cstart < 0) next_cstart = 0 ; weights = d_weights[ilayer+1] ; if (ltype == TYPE_CONV) // A CONV layer has the same weight set for all neurons in visible field wt_cols = d_depth_cols[ilayer+1] ; else // A LOCAL layer has different weights for each neuron wt_cols = d_nhid_cols[ilayer+1] ; // For LOCAL layers, neuron layout in current layer is (height, width, depth). sum = 0.0 ; for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { // Center of first filter is at HalfWidth-Pad; filter begins at -Pad. rstart = strideV * next_row - padV ; cstart = strideH * next_col - padH ; // This is what we would be testing if we didn't compute the exact limits above // rstop = rstart + 2 * d_HalfWidV[ilayer+1] ; // cstop = cstart + 2 * d_HalfWidH[ilayer+1] ; // if (this_row >= rstart && this_row <= rstop && this_col >= cstart && this_col <= cstop) { for (next_slice=0 ; next_slice<depth ; next_slice++) { k_next = (next_row * width + next_col) * depth + next_slice ; if (ltype == TYPE_CONV) // A CONV layer has the same weight set for all neurons in visible field wtptr = weights + next_slice ; else // A LOCAL layer has different weights for each neuron (height, width, depth) wtptr = weights + k_next ; k = ((this_row - rstart) * nH + this_col - cstart) * d_depth[ilayer] + this_slice ; // Location in filter sum += this_delta_ptr[k_next] * wtptr[k*wt_cols] ; } // For next_col } // For next_row } // For next_slice // ihid = (this_row * d_width[ilayer] + this_col) * d_depth[ilayer] + this_slice ; if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*d_nhid[ilayer]+ihid] ; sum *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = sum ; // Save it for doing the next layer back } int cuda_backprop_delta_nonpooled ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; device_backprop_delta_nonpooled <<< block_launch , threads_per_block >>> ( ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_nonpooled launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- backprop_delta_pooled - Backpropagate delta from a POOLAVG or POOLMAX layer ----------------------------------------------------------------------------------- */ __global__ void device_backprop_delta_pooled ( int ilayer // Feed is from ilayer to ilayer+1, so ilayer+1 is POOLAVG or POOLMAX ) { int k, icase, ihid, next_row, next_col, this_row, this_col, this_slice ; int k_next, prod, this_cols, *poolmax_id_ptr ; int next_rstart, next_rstop, next_cstart, next_cstop ; double *this_delta_ptr, *prior_delta_ptr, sum, this_act ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= d_nhid[ilayer]) return ; prod = d_width[ilayer] * d_depth[ilayer] ; this_row = ihid / prod ; k = ihid - this_row * prod ; this_col = k / d_depth[ilayer] ; this_slice = k % d_depth[ilayer] ; icase = blockIdx.y ; this_delta_ptr = d_this_delta + icase * d_nhid[ilayer+1] ; // Coming from the next layer, which was just done prior_delta_ptr = d_prior_delta + icase * d_nhid[ilayer] ; // Save for the next layer to do, one layer back // this >= next * stride IMPLIES next <= this / stride // this <= next * stride + pw - 1 IMPLIES next >= (this - pw + 1) / stride // We can safely do this in integer arithmetic next_rstop = this_row ; k = next_rstart = next_rstop - d_PoolWidV[ilayer+1] + 1 ; next_rstop /= d_strideV[ilayer+1] ; next_rstart /= d_strideV[ilayer+1] ; if (k >= 0 && k % d_strideV[ilayer+1]) ++next_rstart ; if (next_rstop >= d_height[ilayer+1]) next_rstop = d_height[ilayer+1] - 1 ; if (next_rstart < 0) next_rstart = 0 ; next_cstop = this_col ; k = next_cstart = next_cstop - d_PoolWidH[ilayer+1] + 1 ; next_cstop /= d_strideH[ilayer+1] ; next_cstart /= d_strideH[ilayer+1] ; if (k >= 0 && k % d_strideH[ilayer+1]) ++next_cstart ; if (next_cstop >= d_width[ilayer+1]) next_cstop = d_width[ilayer+1] - 1 ; if (next_cstart < 0) next_cstart = 0 ; sum = 0.0 ; if (d_layer_type[ilayer+1] == TYPE_POOLAVG) { for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { k_next = (next_row * d_width[ilayer+1] + next_col) * d_depth[ilayer+1] + this_slice ; sum += this_delta_ptr[k_next] ; } // For next_col } // For next_row sum /= d_PoolWidH[ilayer+1] * d_PoolWidV[ilayer+1] ; } // POOLAVG else if (d_layer_type[ilayer+1] == TYPE_POOLMAX) { poolmax_id_ptr = d_poolmax_id[ilayer+1] + icase * d_nhid[ilayer+1] ; this_cols = d_width[ilayer] ; for (next_row=next_rstart ; next_row<=next_rstop ; next_row++) { for (next_col=next_cstart ; next_col<=next_cstop ; next_col++) { k_next = (next_row * d_width[ilayer+1] + next_col) * d_depth[ilayer+1] + this_slice ; // Was the current-layer neuron the winner in the MAX competition for the next-layer competition? if (this_row == poolmax_id_ptr[k_next] / this_cols && this_col == poolmax_id_ptr[k_next] % this_cols) sum += this_delta_ptr[k_next] ; // Weight is 1 } // For next_col } // For next_row } // POOLMAX // ihid = (this_row * d_width[ilayer] + this_col) * d_depth[ilayer] + this_slice ; if (d_layer_type[ilayer] == TYPE_FC || d_layer_type[ilayer] == TYPE_LOCAL || d_layer_type[ilayer] == TYPE_CONV) { this_act = d_act[ilayer][icase*d_nhid[ilayer]+ihid] ; sum *= 1.0 - this_act * this_act ; // Derivative } prior_delta_ptr[ihid] = sum ; // Save it for doing the next layer back } int cuda_backprop_delta_pooled ( int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; device_backprop_delta_pooled <<< block_launch , threads_per_block >>> ( ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_backprop_delta_pooled launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- move_delta - Move delta from prior_delta to this_delta ----------------------------------------------------------------------------------- */ __global__ void device_move_delta ( int nhid // Number of neurons in the layer just processed ) { int icase, ihid ; ihid = blockIdx.x * blockDim.x + threadIdx.x ; if (ihid >= nhid) return ; icase = blockIdx.y ; d_this_delta[icase*nhid+ihid] = d_prior_delta[icase*nhid+ihid] ; } int cuda_move_delta ( int nc , // Number of cases in batch int nhid_this // Number of hidden neurons in this layer ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (nhid_this + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_this + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nc ; block_launch.z = 1 ; device_move_delta <<< block_launch , threads_per_block >>> ( nhid_this ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_move_delta launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* ----------------------------------------------------------------------------------- hidden_gradient_FC - Compute gradient for a fully connected hidden layer ----------------------------------------------------------------------------------- */ __global__ void device_hidden_gradient_FC ( int istart , // Index of first case in this batch int nc , // Number of cases in batch int ilayer // Hidden layer being processed ) { int iin, ihid, nin, ninp1 ; float *gptr ; double input ; iin = blockIdx.x * blockDim.x + threadIdx.x ; if (ilayer == 0) nin = d_n_pred ; // Number of inputs to each neuron in this layer else nin = d_nhid[ilayer-1] ; // icase = blockIdx.z ; // Used directly below if (iin > nin) return ; else if (iin == nin) input = 1.0 ; // Bias else if (ilayer) input = d_act[ilayer-1][blockIdx.z*nin+iin] ; else input = d_predictors[(istart+blockIdx.z)*nin+iin] ; ihid = blockIdx.y ; ninp1 = nin + 1 ; // We mustn't forget the bias, so nin+1 gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case gptr[ihid*ninp1+iin] = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid] * input ; } /* ------------------------------------------------------------------------------------------------------ hidden_gradient_LOCAL_CONV - Compute gradient for a locally connected or convolutional hidden layer For a LOCAL layer, we do all of the nhid * n_prior_weights * max_batch entries. But for a CONV layer, there are just depth * n_prior_weights * max_batch entries because the weight set for every (height,width) placement in the visual field is the same in any single slice. So there are not enough entries in the gradient vector. Thus, we use the previously allocated convgrad_work vector, which has nhid * n_prior_weights * max_batch entries. Then we will launch another kernel which flattens out the height and width dimensions by summing them into the gradient vector. Note: ihidstart must be a multiple of height*width! ------------------------------------------------------------------------------------------------------ */ __global__ void device_hidden_gradient_LOCAL_CONV ( int local_vs_conv , // Is this a LOCAL (vs CONV) layer? int nfilt , // Filter size, (2*hwV+1) * (2*hwH+1) * depth of input (does not include +1 for bias) int istart , // Index of first case in this batch int depth_offset , // Start processing layers at this depth int n_depths , // Number of slices to be processed int ilayer // Hidden layer being processed ) { int k, iin, ifilt, ihid_offset, ihid_actual, prod ; int in_row, in_col, in_slice, in_rows, in_cols, in_slices ; int this_row, this_col, ifiltV, ifiltH ; float *gptr ; double input, delta ; ifilt = blockIdx.x * blockDim.x + threadIdx.x ; // <= filter size if (ifilt > nfilt) return ; // Input is from either the input image or a prior layer's activations // Get the input dimensions (height, width, depth) if (ilayer == 0) { in_rows = d_img_rows ; in_cols = d_img_cols ; in_slices = d_img_bands ; } else { in_rows = d_height[ilayer-1] ; in_cols = d_width[ilayer-1] ; in_slices = d_depth[ilayer-1] ; } // We may be splitting the computation into multiple launches, doing one or more slices in each. // If so, we need to compute the actual slice/neuron being processed here. // If we are doing a CONV layer, the offset will be into convgrad_work. // Whenever we access data, we use ihid_actual, and we also use it to save a LOCAL gradient. // But when we save a CONV gradient, we use ihid_offset. // Recall that hidden neurons are stored with depth changing fastest. ihid_offset = blockIdx.y ; // Offset into this launch set prod = d_width[ilayer] * d_height[ilayer] ; // Size of visual field, a slice k = ihid_offset % n_depths + depth_offset ; // Actual starting slice ihid_actual = ihid_offset / n_depths * d_depth[ilayer] + k ; // Actual hidden neuron being done // If this is the bias term, it's simple. // Recall that blockIdx.z is the case in this batch if (ifilt == nfilt) { // Bias term delta = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid_actual] ; if (local_vs_conv) { gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case gptr[ihid_actual*d_n_prior_weights[ilayer]+d_n_prior_weights[ilayer]-1] = delta ; } else { gptr = d_convgrad_work + blockIdx.z * d_max_convgrad_each ; gptr[ihid_offset*d_convgrad_cols[ilayer]+d_n_prior_weights[ilayer]-1] = delta ; } return ; } // Get the location of this kernel within the filter // Thread ifilt is the ordinal number of the filter element // The filter order is (height, width, slice) prod = (2 * d_HalfWidH[ilayer] + 1) * in_slices ; ifiltV = ifilt / prod ; k = ifilt - ifiltV * prod ; ifiltH = k / in_slices ; in_slice = k % in_slices ; // Get the location of this neuron within the volume of the current layer prod = d_width[ilayer] * d_depth[ilayer] ; this_row = ihid_actual / prod ; k = ihid_actual - this_row * prod ; this_col = k / d_depth[ilayer] ; // this_slice = k % d_depth[ilayer] ; // Not needed; here for clarity only // Get the location of this filter element within the input volume. // It may be outside an edge, in which case there is nothing to do. // The filter center is at stride * CurrentPos + HalfWidth - Pad. // The upper-left corner is at stride * CurrentPos - Pad. // This can cause branch-induced stalling, but only at edges. in_row = d_strideV[ilayer] * this_row - d_padV[ilayer] + ifiltV ; if (in_row < 0 || in_row >= in_rows) return ; in_col = d_strideH[ilayer] * this_col - d_padH[ilayer] + ifiltH ; if (in_col < 0 || in_col >= in_cols) return ; // Here we go if (local_vs_conv) gptr = d_grad[ilayer] + blockIdx.z * d_n_weights ; // Gradient of this hidden layer for this case else gptr = d_convgrad_work + blockIdx.z * d_max_convgrad_each ; delta = d_this_delta[blockIdx.z*d_nhid[ilayer]+ihid_actual] ; // Fetch the input. Adjacent threads have adjacent memory accesses, though not zero padded for alignment. // But zero padding would do no good here because in general, warps will only by chance start with iin=0. // All is great if in_slices and prior-layer size are multiples of 16! iin = (in_row * in_cols + in_col) * in_slices + in_slice ; if (ilayer) input = d_act[ilayer-1][blockIdx.z*d_nhid[ilayer-1]+iin] ; else input = d_predictors[(istart+blockIdx.z)*d_n_pred+iin] ; // Adjacent threads access adjacent memory, though there is no zero padding for alignment. // Zero padding here would help, because ifilt starts at zero. // But that would complicate the code a lot, and this is a small fraction of instructions. // Also, the kernel is generally limited by the math pipeline. // And of course if n_prior_weights is a multiple of 32, all is good! if (local_vs_conv) gptr[ihid_actual*d_n_prior_weights[ilayer]+ifilt] = input * delta ; else gptr[ihid_offset*d_convgrad_cols[ilayer]+ifilt] = input * delta ; } __global__ void device_flatten_gradient ( int islice_start , // Index of first slice in this batch int max_depth , // Max depth in launch, <= slices reserved in convgrad_work int ilayer // Hidden layer being processed ) { int k, islice, icase, iprior, irow, icol ; double sum ; float *workptr, *gradptr ; iprior = blockIdx.x * blockDim.x + threadIdx.x ; if (iprior >= d_n_prior_weights[ilayer]) return ; islice = blockIdx.y ; icase = blockIdx.z ; gradptr = d_grad[ilayer] + icase * d_n_weights ; // Gradient of this hidden layer for this case workptr = d_convgrad_work + icase * d_max_convgrad_each ; // nvisual = d_height[ilayer] * d_width[ilayer] ; // Also equals nhid / depth sum = 0.0 ; for (irow=0 ; irow<d_height[ilayer] ; irow++) { for (icol=0 ; icol<d_width[ilayer] ; icol++) { k = (irow * d_width[ilayer] + icol) * max_depth + islice ; // The neuron at (irow, icol, islice) // assert ( k*d_convgrad_cols[ilayer]+iprior < d_max_convgrad_each ) ; sum += workptr[k*d_convgrad_cols[ilayer]+iprior] ; } } gradptr[(islice+islice_start)*d_n_prior_weights[ilayer]+iprior] = sum ; } int cuda_hidden_gradient ( int max_hid_grad , // Max hid in a CONV hid grad launch; multiple of height*width; <= 65535 int max_mem_grad , // Maximum CONV working memory (MB) per CUDA launch; prevents timeout error and lowers memory use int istart , // Index of first case in this batch int nc , // Number of cases in batch int ilayer , // Hidden layer being processed int type , // Type of this layer int nhid_this , // Number of hidden neurons in this layer int nhid_prior , // And in prior layer int depth , // Depth of this layer int n_prior_weights , // N of inputs per neuron (including bias) to prior layer = prior depth * (2*HalfWidH+1) * (2*HalfWidV+1) + 1 int *n_launches // Returned for user edification ) { int i, conv_cols, n_max, nhid_launch, ihid_start, warpsize, threads_per_block, field, divisor ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; field = nhid_this / depth ; // Visual field size = height * width warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future *n_launches = 1 ; // This is purely for reporting launch statistics if (type == TYPE_FC) { threads_per_block = (nhid_prior + 1 + warpsize - 1) / warpsize * warpsize ; // +1 includes bias if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (nhid_prior + 1 + threads_per_block - 1) / threads_per_block ; // Include bias block_launch.y = nhid_this ; block_launch.z = nc ; device_hidden_gradient_FC <<< block_launch , threads_per_block >>> ( istart , nc , ilayer ) ; cudaDeviceSynchronize() ; } else if (type == TYPE_LOCAL || type == TYPE_CONV) { divisor = 1 ; // Figure out how much we have to divide slices to meet max_hid_grad and max_mem_grad limits if (type == TYPE_CONV) { conv_cols = (n_prior_weights + 31) / 32 * 32 ; // CONV scratch is zero padded for full coalescing n_max = 1024 * 1024 * max_mem_grad / (max_batch * conv_cols * sizeof(float)) ; // Launch limit satisfying memory } else n_max = MAXPOSNUM ; for ( ;; ) { nhid_launch = depth / divisor * field ; // We will launch this many hid at a time if (nhid_launch <= max_hid_grad && nhid_launch <= n_max) break ; ++divisor ; } if (nhid_launch < field) // Careless user may have set it too small nhid_launch = field ; // So ignore it /* Launch loop */ *n_launches = 0 ; if (type == TYPE_CONV) { // We must zero the CONV work area because some entries may be undefined // This must also be done in the last pass, because a partial launch at the end // may have garbage from the prior launch in 'undefined' locations. for (i=0 ; i<max_convgrad_work ; i++) fdata[i] = 0.0 ; error_id = cudaMemcpy ( h_convgrad_work , fdata , max_convgrad_work * sizeof(float) , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient_LOCAL_CONV convgrad_work zero error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } for (ihid_start=0 ; ihid_start < depth*field ; ihid_start+=nhid_launch) { threads_per_block = (n_prior_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) // Increase? May be reasonable threads_per_block = 4 * warpsize ; block_launch.x = (n_prior_weights + threads_per_block - 1) / threads_per_block ; block_launch.y = nhid_launch ; if (depth*field - ihid_start < nhid_launch) { // Last launch may be partial block_launch.y = depth*field - ihid_start ; if (type == TYPE_CONV) { for (i=0 ; i<max_convgrad_work ; i++) fdata[i] = 0.0 ; error_id = cudaMemcpy ( h_convgrad_work , fdata , max_convgrad_work * sizeof(float) , cudaMemcpyHostToDevice ) ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient_LOCAL_CONV convgrad_work zero error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } } // If last launch is partial block_launch.z = nc ; device_hidden_gradient_LOCAL_CONV <<< block_launch , threads_per_block >>> ( type==TYPE_LOCAL ? 1 : 0 , n_prior_weights-1 , istart , ihid_start/field , block_launch.y/field , ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient LOCAL_CONV launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } if (type == TYPE_CONV) { // Must also flatten gradient? assert ( nhid_launch * nc * n_prior_weights <= max_convgrad_work ) ; threads_per_block = (n_prior_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_prior_weights + threads_per_block - 1) / threads_per_block ; block_launch.y /= field ; // Number of slices in launch block_launch.z = nc ; device_flatten_gradient <<< block_launch , threads_per_block >>> ( ihid_start/field , block_launch.y , ilayer ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_hidden_gradient flatten_gradient launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } } // CONV ++*n_launches ; } // Launch loop } // LOCAL or CONV return 0 ; } /* -------------------------------------------------------------------------------- zero_gradient - Some gradient entires may be undefined (zero, actually) due to lack of connections in a poorly designed model. So before computing the gradient for a batch, we must zero the entire gradient vector. -------------------------------------------------------------------------------- */ __global__ void device_zero_gradient ( int nc // Number of cases in batch ) { int index, icase ; float *gptr ; index = blockIdx.x * blockDim.x + threadIdx.x ; if (index >= d_n_weights) return ; icase = blockIdx.y ; gptr = d_grad[0] + index ; // Complete gradient starts at [0] gptr[icase*d_n_weights] = 0.0f ; } int cuda_zero_gradient ( int nc , // Number of cases in batch int n_weights // Number of weights ) { int warpsize, threads_per_block ; char msg[256] ; dim3 block_launch ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; block_launch.x = (n_weights + threads_per_block - 1) / threads_per_block ; block_launch.y = nc ; block_launch.z = 1 ; device_zero_gradient <<< block_launch , threads_per_block >>> ( nc ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_zero_gradient launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } return 0 ; } /* -------------------------------------------------------------------------------- fetch_gradient - Retrieve sum across batch of complete gradient The CUDA grad is neither the order of the CUDA weights, nor the HOST grad! Rather, they are grouped by current neuron, (row, col, slice), and with input order as the CUDA inputs (row, column, slice). A fully connected layer has height=width=1; all neurons are depth. -------------------------------------------------------------------------------- */ __global__ void device_fetch_gradient ( int nc // Number of cases in batch ) { int index, icase ; float *gptr ; double sum ; index = blockIdx.x * blockDim.x + threadIdx.x ; if (index >= d_n_weights) return ; sum = 0.0 ; gptr = d_grad[0] + index ; // Complete gradient starts at [0] for (icase=0 ; icase<nc ; icase++) // For all cases in this batch sum += gptr[icase*d_n_weights] ; *gptr = sum ; } int cuda_fetch_gradient ( int nc , // Number of cases in batch int n_weights , // Number of weights double **hostgrad , // Gradient sum output here int n_classes , // Number of outputs int n_layers , // Hidden layers; does not include output int *layer_type , // Each entry (input to final) is TYPE_? in CONST.H int img_rows , // Size of input image int img_cols , int img_bands , int *height , // Height of visible field in each layer int *width , // Width of visible field int *depth , // Number of slices in each layer int *nhid , // Number of hidden neurons in each layer int *hwH , // Half-width of filters int *hwV ) { int warpsize, blocks_per_grid, threads_per_block ; int n, n_prior, ilayer, isub ; int idepth, iheight, iwidth, ndepth, nheight, nwidth ; int in_row, in_col, in_slice, in_n_height, in_n_width, in_n_depth ; double *gptr ; float *fptr ; char msg[256] ; cudaError_t error_id ; warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future threads_per_block = (n_weights + warpsize - 1) / warpsize * warpsize ; if (threads_per_block > 4 * warpsize) threads_per_block = 4 * warpsize ; blocks_per_grid = (n_weights + threads_per_block - 1) / threads_per_block ; device_fetch_gradient <<< blocks_per_grid , threads_per_block >>> ( nc ) ; cudaDeviceSynchronize() ; error_id = cudaGetLastError () ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_fetch_gradient launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } error_id = cudaMemcpy ( fdata , grad , n_weights * sizeof(float) , cudaMemcpyDeviceToHost ) ; if (error_id != cudaSuccess) { sprintf_s ( msg , 255 , "cuda_fetch_gradient copy error %d: %s", error_id, cudaGetErrorString(error_id) ) ; audit ( msg ) ; MEMTEXT ( msg ) ; return 1 ; } /* Reorder */ fptr = fdata ; for (ilayer=0 ; ilayer<=n_layers ; ilayer++) { gptr = hostgrad[ilayer] ; /* Fully connected */ if (ilayer == n_layers || layer_type[ilayer] == TYPE_FC) { if (ilayer == 0) { in_n_height = img_rows ; in_n_width = img_cols ; in_n_depth = img_bands ; } else { in_n_height = height[ilayer-1] ; in_n_width = width[ilayer-1] ; in_n_depth = depth[ilayer-1] ; } n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias if (ilayer == n_layers) n = n_classes ; // Equals depth else n = nhid[ilayer] ; // Equals depth for (idepth=0 ; idepth<n ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row // Bias isub = idepth * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } /* LOCAL */ else if (layer_type[ilayer] == TYPE_LOCAL) { // For LOCAL layers, neuron layout in current layer is (height, width, depth). n = nhid[ilayer] ; ndepth = depth[ilayer] ; nheight = height[ilayer] ; nwidth = width[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (iheight=0 ; iheight<nheight ; iheight++) { // nhid = ndepth * nheight * nwidth for (iwidth=0 ; iwidth<nwidth ; iwidth++) { // We must reorder so depth changes fastest for (idepth=0 ; idepth<ndepth ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight in host isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row // Bias isub = (idepth * nheight + iheight) * nwidth + iwidth ; // Neuron in this layer isub = isub * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } // For iwidth } // For iheight } /* CONV */ else if (layer_type[ilayer] == TYPE_CONV) { nheight = height[ilayer] ; nwidth = width[ilayer] ; ndepth = depth[ilayer] ; in_n_height = 2 * hwV[ilayer] + 1 ; in_n_width = 2 * hwH[ilayer] + 1 ; if (ilayer == 0) in_n_depth = img_bands ; else in_n_depth = depth[ilayer-1] ; n_prior = in_n_height * in_n_width * in_n_depth + 1 ; // Number of weights per neuron, including bias for (idepth=0 ; idepth<ndepth ; idepth++) { for (in_row=0 ; in_row<in_n_height ; in_row++) { for (in_col=0 ; in_col<in_n_width ; in_col++) { for (in_slice=0 ; in_slice<in_n_depth ; in_slice++) { // Compute location of this neuron's weight vector in host isub = idepth * n_prior + (in_slice * in_n_height + in_row) * in_n_width + in_col ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For in_slice } // For in_col } // For in_row //Bias isub = idepth * n_prior + n_prior - 1 ; assert ( isub < n_weights ) ; gptr[isub] += *fptr++ ; } // For idepth } } // For ilayer assert ( fptr == fdata + n_weights ) ; return 0 ; } /* -------------------------------------------------------------------------------- CUDA_CLEANUP - Cleanup after CUDA processing -------------------------------------------------------------------------------- */ void cuda_cleanup ( int n_layers , int *layer_type ) { int i ; double sum ; char msg[256] ; MEMTEXT ( "CUDA cuda_cleanup starting" ) ; if (h_predictors != NULL) { cudaFree ( h_predictors ) ; h_predictors = NULL ; } if (h_class != NULL) { cudaFree ( h_class ) ; h_class = NULL ; } if (activations != NULL) { cudaFree ( activations ) ; activations = NULL ; } if (h_output != NULL) { cudaFree ( h_output ) ; h_output = NULL ; } for (i=0 ; i<n_layers ; i++) { if (h_poolmax_id[i] != NULL) { cudaFree ( h_poolmax_id[i] ) ; h_poolmax_id[i] = NULL ; } } if (weights != NULL) { cudaFree ( weights ) ; weights = NULL ; } if (grad != NULL) { cudaFree ( grad ) ; grad = NULL ; } if (h_convgrad_work != NULL) { cudaFree ( h_convgrad_work ) ; h_convgrad_work = NULL ; } if (h_this_delta != NULL) { cudaFree ( h_this_delta ) ; h_this_delta = NULL ; } if (h_prior_delta != NULL) { cudaFree ( h_prior_delta ) ; h_prior_delta = NULL ; } if (h_ll_out != NULL) { cudaFree ( h_ll_out ) ; h_ll_out = NULL ; } if (reduc_fdata != NULL) { FREE ( reduc_fdata ) ; reduc_fdata = NULL ; } if (fdata != NULL) { FREE ( fdata ) ; fdata = NULL ; } total_memory = 0.0 ; cudaDeviceReset () ; /* Print CUDA timers */ sum = 1.e-20 ; for (i=0 ; i<MAX_LAYERS ; i++) { sum += CudaTimers.act[i] ; sum += CudaTimers.delta[i] ; sum += CudaTimers.grad[i] ; } sum += CudaTimers.weights + CudaTimers.softmax + CudaTimers.ll + CudaTimers.movedelta + CudaTimers.fetchgrad ; cudalog ( "" ) ; cudalog ( "" ) ; cudalog ( "CUDA times in seconds: total, (percent), per launch" ) ; cudalog ( "" ) ; sprintf ( msg, " Send weights = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.weights, 100.0 * CudaTimers.weights / sum, 0.001 * CudaTimers.weights / (CudaTimers.ncalls_weights + 1.e-20)) ; cudalog ( msg ) ; for (i=0 ; i<=n_layers ; i++) { if (i == n_layers) cudalog ( " Output layer" ) ; else if (layer_type[i] == TYPE_FC) { sprintf ( msg, " Layer %d is fully connected", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_LOCAL) { sprintf ( msg, " Layer %d is locally connected", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_CONV) { sprintf ( msg, " Layer %d is convolutional", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_POOLAVG) { sprintf ( msg, " Layer %d is pooled average", i+1 ) ; cudalog ( msg ) ; } else if (layer_type[i] == TYPE_POOLMAX) { sprintf ( msg, " Layer %d is pooled max", i+1 ) ; cudalog ( msg ) ; } sprintf ( msg, " act = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.act[i], 100.0 * CudaTimers.act[i] / sum, 0.001 * CudaTimers.act[i] / (CudaTimers.ncalls_act[i] + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " delta = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.delta[i], 100.0 * CudaTimers.delta[i] / sum, 0.001 * CudaTimers.delta[i] / (CudaTimers.ncalls_delta[i] + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " grad = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.grad[i], 100.0 * CudaTimers.grad[i] / sum, 0.001 * CudaTimers.grad[i] / (CudaTimers.ncalls_grad[i] + 1.e-20)) ; cudalog ( msg ) ; assert ( CudaTimers.grad[i] >= 0.0 ) ; assert ( CudaTimers.ncalls_grad[i] >= 0.0 ) ; assert ( (0.001 * CudaTimers.grad[i] / (CudaTimers.ncalls_grad[i] + 1.e-20)) >= 0.0 ) ; } sprintf ( msg, " SoftMax = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.softmax, 100.0 * CudaTimers.softmax / sum, 0.001 * CudaTimers.softmax / (CudaTimers.ncalls_softmax + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Log likelihood = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.ll, 100.0 * CudaTimers.ll / sum, 0.001 * CudaTimers.ll / (CudaTimers.ncalls_ll + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Move delta = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.movedelta, 100.0 * CudaTimers.movedelta / sum, 0.001 * CudaTimers.movedelta / (CudaTimers.ncalls_movedelta + 1.e-20)) ; cudalog ( msg ) ; sprintf ( msg, " Fetch grad = %8.3lf (%5.1lf percent) %10.6lf per launch", 0.001 * CudaTimers.fetchgrad, 100.0 * CudaTimers.fetchgrad / sum, 0.001 * CudaTimers.fetchgrad / (CudaTimers.ncalls_fetchgrad + 1.e-20)) ; cudalog ( msg ) ; MEMTEXT ( "CUDA cuda_cleanup ending" ) ; }
0f01d0099fbc7e4079b303845010c92d7c2afd99.hip
// !!! This is a file automatically generated by hipify!!! #include "BufferedRenderProcess.test.cuh" #include "../../../Namespaces/Tests/Tests.h" #include "../../../Namespaces/Images/Images.cuh" #include <string> #include <cctype> namespace BufferedRenderProcessTest { void FramerateLogger::start() { renderedFrames = 0; lastRenderedFrames = lastDisplayedFrames = 0; lastTime = clock(); } void FramerateLogger::setBufferedWindow(BufferedWindow *bufferedWindow) { displayWindow = bufferedWindow; } void FramerateLogger::registerIterationCompletionCallback(BufferedRenderProcess *process) { process->setIterationCompletionCallback(iterationCompletionCallback, (void*)this); } void FramerateLogger::iterationCompletionCallback(void *testCase) { FramerateLogger *self = ((FramerateLogger*)testCase); self->renderedFrames++; clock_t curTime = clock(); float deltaTime = (((float)(curTime - self->lastTime)) / CLOCKS_PER_SEC); if (deltaTime >= 1.0f) { uint32_t rendered = self->renderedFrames; uint32_t displayed = ((self->displayWindow == NULL) ? 0 : (uint32_t)self->displayWindow->framesDisplayed()); float fps = ((rendered - self->lastRenderedFrames) / deltaTime); float screenFps = ((displayed - self->lastDisplayedFrames) / deltaTime); self->lastRenderedFrames = rendered; self->lastDisplayedFrames = displayed; self->lastTime = curTime; std::cout << "FPS: " << fps << " (displayed: " << screenFps << ")" << std::endl; } } namespace { static void testBufferedRenderProcess( BufferedRenderer*(*bufferedRendererCreateFunction)(const Renderer::ThreadConfiguration &configuration, void *aux), void *createFnAux, const Renderer::ThreadConfiguration &configuration, BufferedRenderProcess *process) { BufferedRenderer *renderer = bufferedRendererCreateFunction(configuration, createFnAux); bool shouldSynchFromDevice = (!((configuration.numHostThreads() > 0) || (configuration.numActiveDevices() > 1))); BufferedWindow bufferedWindow(shouldSynchFromDevice ? BufferedWindow::SYNCH_FRAME_BUFFER_FROM_DEVICE : 0); process->setRenderer(renderer); process->setTargetDisplayWindow(&bufferedWindow); FramerateLogger logger; logger.setBufferedWindow(&bufferedWindow); logger.registerIterationCompletionCallback(process); logger.start(); process->start(); while (!bufferedWindow.windowClosed()) std::this_thread::sleep_for(std::chrono::milliseconds(32)); process->end(); if ((renderer->getFrameBuffer() != NULL) && (renderer->getFrameBuffer()->cpuHandle() != NULL)) { std::cout << "Enter name ending with \".png\" to save the render: "; std::string rawLine; std::getline(std::cin, rawLine); int start = 0; while ((start < rawLine.length()) && std::isspace(rawLine[start])) start++; int end = (int)rawLine.size(); while ((end > 0) && std::isspace(rawLine[end - 1])) end--; std::string line; for (int i = start; i < end; i++) line += rawLine[i]; if ((line.length() > 4) && line.substr(line.length() - 4) == ".png") { std::cout << "Saving to \"" << line << "\"..." << std::endl; bool failed = false; if (shouldSynchFromDevice) { if (hipSetDevice(0) != hipSuccess) failed = true; else if (renderer->getFrameBuffer()->gpuHandle(0) == NULL) failed = true; else if (!renderer->getFrameBuffer()->cpuHandle()->updateHostBlocks( renderer->getFrameBuffer()->gpuHandle(0), 0, renderer->getFrameBuffer()->cpuHandle()->getBlockCount())) failed = true; } if (!failed) { Images::Error error = Images::saveBufferPNG(*renderer->getFrameBuffer()->cpuHandle(), line); if (error != Images::IMAGES_NO_ERROR) std::cout << "Failed to save image. Error code: " << error << std::endl; else std::cout << "Image saved successfuly;" << std::endl; } else std::cout << "Failed to load image from device..." << std::endl; } } delete renderer; } } #define NUM_THREADS_PER_GPU 2 void runTestGauntlet( BufferedRenderer*(*bufferedRendererCreateFunction)(const Renderer::ThreadConfiguration &configuration, void *aux), void *createFnAux, FrameBufferManager *bufferA, FrameBufferManager *bufferB, uint32_t tests) { BufferedRenderProcess bufferedRenderProcess; bufferedRenderProcess.setInfinateTargetIterations(); bufferedRenderProcess.setTargetResolutionToWindowSize(); Renderer::ThreadConfiguration configuration; bufferedRenderProcess.setBuffer(bufferA); if ((tests & TEST_MULTI_ITER_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (configuration.numDevices() > 0) { if ((tests & TEST_MULTI_ITER_GPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if ((tests & TEST_MULTI_ITER_CPU_AND_GPU) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL_BUT_GPU_THREADS); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_CPU_AND_GPU", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } if ((tests & TEST_MULTI_ITER_1_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ONE); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_1_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (((tests & TEST_MULTI_ITER_1_GPU_ONLY) != 0) && (configuration.numDevices() > 1)) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(0); configuration.configureGPU(0, NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_1_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } bufferedRenderProcess.setDoubleBuffers(bufferA, bufferB); if ((tests & TEST_SINGLE_ITER_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (configuration.numDevices() > 0) { if ((tests & TEST_SINGLE_ITER_GPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if ((tests & TEST_SINGLE_ITER_CPU_AND_GPU) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL_BUT_GPU_THREADS); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_CPU_AND_GPU", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } if ((tests & TEST_SINGLE_ITER_1_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ONE); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_1_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (((tests & TEST_SINGLE_ITER_1_GPU_ONLY) != 0) && (configuration.numDevices() > 1)) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(0); configuration.configureGPU(0, NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_1_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } }
0f01d0099fbc7e4079b303845010c92d7c2afd99.cu
#include "BufferedRenderProcess.test.cuh" #include "../../../Namespaces/Tests/Tests.h" #include "../../../Namespaces/Images/Images.cuh" #include <string> #include <cctype> namespace BufferedRenderProcessTest { void FramerateLogger::start() { renderedFrames = 0; lastRenderedFrames = lastDisplayedFrames = 0; lastTime = clock(); } void FramerateLogger::setBufferedWindow(BufferedWindow *bufferedWindow) { displayWindow = bufferedWindow; } void FramerateLogger::registerIterationCompletionCallback(BufferedRenderProcess *process) { process->setIterationCompletionCallback(iterationCompletionCallback, (void*)this); } void FramerateLogger::iterationCompletionCallback(void *testCase) { FramerateLogger *self = ((FramerateLogger*)testCase); self->renderedFrames++; clock_t curTime = clock(); float deltaTime = (((float)(curTime - self->lastTime)) / CLOCKS_PER_SEC); if (deltaTime >= 1.0f) { uint32_t rendered = self->renderedFrames; uint32_t displayed = ((self->displayWindow == NULL) ? 0 : (uint32_t)self->displayWindow->framesDisplayed()); float fps = ((rendered - self->lastRenderedFrames) / deltaTime); float screenFps = ((displayed - self->lastDisplayedFrames) / deltaTime); self->lastRenderedFrames = rendered; self->lastDisplayedFrames = displayed; self->lastTime = curTime; std::cout << "FPS: " << fps << " (displayed: " << screenFps << ")" << std::endl; } } namespace { static void testBufferedRenderProcess( BufferedRenderer*(*bufferedRendererCreateFunction)(const Renderer::ThreadConfiguration &configuration, void *aux), void *createFnAux, const Renderer::ThreadConfiguration &configuration, BufferedRenderProcess *process) { BufferedRenderer *renderer = bufferedRendererCreateFunction(configuration, createFnAux); bool shouldSynchFromDevice = (!((configuration.numHostThreads() > 0) || (configuration.numActiveDevices() > 1))); BufferedWindow bufferedWindow(shouldSynchFromDevice ? BufferedWindow::SYNCH_FRAME_BUFFER_FROM_DEVICE : 0); process->setRenderer(renderer); process->setTargetDisplayWindow(&bufferedWindow); FramerateLogger logger; logger.setBufferedWindow(&bufferedWindow); logger.registerIterationCompletionCallback(process); logger.start(); process->start(); while (!bufferedWindow.windowClosed()) std::this_thread::sleep_for(std::chrono::milliseconds(32)); process->end(); if ((renderer->getFrameBuffer() != NULL) && (renderer->getFrameBuffer()->cpuHandle() != NULL)) { std::cout << "Enter name ending with \".png\" to save the render: "; std::string rawLine; std::getline(std::cin, rawLine); int start = 0; while ((start < rawLine.length()) && std::isspace(rawLine[start])) start++; int end = (int)rawLine.size(); while ((end > 0) && std::isspace(rawLine[end - 1])) end--; std::string line; for (int i = start; i < end; i++) line += rawLine[i]; if ((line.length() > 4) && line.substr(line.length() - 4) == ".png") { std::cout << "Saving to \"" << line << "\"..." << std::endl; bool failed = false; if (shouldSynchFromDevice) { if (cudaSetDevice(0) != cudaSuccess) failed = true; else if (renderer->getFrameBuffer()->gpuHandle(0) == NULL) failed = true; else if (!renderer->getFrameBuffer()->cpuHandle()->updateHostBlocks( renderer->getFrameBuffer()->gpuHandle(0), 0, renderer->getFrameBuffer()->cpuHandle()->getBlockCount())) failed = true; } if (!failed) { Images::Error error = Images::saveBufferPNG(*renderer->getFrameBuffer()->cpuHandle(), line); if (error != Images::IMAGES_NO_ERROR) std::cout << "Failed to save image. Error code: " << error << std::endl; else std::cout << "Image saved successfuly;" << std::endl; } else std::cout << "Failed to load image from device..." << std::endl; } } delete renderer; } } #define NUM_THREADS_PER_GPU 2 void runTestGauntlet( BufferedRenderer*(*bufferedRendererCreateFunction)(const Renderer::ThreadConfiguration &configuration, void *aux), void *createFnAux, FrameBufferManager *bufferA, FrameBufferManager *bufferB, uint32_t tests) { BufferedRenderProcess bufferedRenderProcess; bufferedRenderProcess.setInfinateTargetIterations(); bufferedRenderProcess.setTargetResolutionToWindowSize(); Renderer::ThreadConfiguration configuration; bufferedRenderProcess.setBuffer(bufferA); if ((tests & TEST_MULTI_ITER_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (configuration.numDevices() > 0) { if ((tests & TEST_MULTI_ITER_GPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if ((tests & TEST_MULTI_ITER_CPU_AND_GPU) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL_BUT_GPU_THREADS); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_CPU_AND_GPU", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } if ((tests & TEST_MULTI_ITER_1_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ONE); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_1_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (((tests & TEST_MULTI_ITER_1_GPU_ONLY) != 0) && (configuration.numDevices() > 1)) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(0); configuration.configureGPU(0, NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_MULTI_ITER_1_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } bufferedRenderProcess.setDoubleBuffers(bufferA, bufferB); if ((tests & TEST_SINGLE_ITER_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (configuration.numDevices() > 0) { if ((tests & TEST_SINGLE_ITER_GPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if ((tests & TEST_SINGLE_ITER_CPU_AND_GPU) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ALL_BUT_GPU_THREADS); configuration.configureEveryGPU(NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_CPU_AND_GPU", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } if ((tests & TEST_SINGLE_ITER_1_CPU_ONLY) != 0) { configuration.configureCPU(Renderer::ThreadConfiguration::ONE); configuration.configureEveryGPU(Renderer::ThreadConfiguration::NONE); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_1_CPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } if (((tests & TEST_SINGLE_ITER_1_GPU_ONLY) != 0) && (configuration.numDevices() > 1)) { configuration.configureCPU(Renderer::ThreadConfiguration::NONE); configuration.configureEveryGPU(0); configuration.configureGPU(0, NUM_THREADS_PER_GPU); Tests::runTest(testBufferedRenderProcess, "TEST_SINGLE_ITER_1_GPU_ONLY", bufferedRendererCreateFunction, createFnAux, configuration, &bufferedRenderProcess); } } }
b1d5788e8301ea21aeacf78b4fb95016479e79dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "prototypes.h" #include "oct.h" #include <cutil.h> #include <cudpp.h> #define NPMAX 2097152 #define NOMAX 262144 #define NPBLOCK 64 struct PLIGHT{ int keyp; float xp; float yp; float zp; float mp; }; extern "C" void call_cic_GPU(int levelmax,int levelcoarse,struct OCT **firstoct, struct CPUINFO *cpu); //#define CUERR() printf("\n %s \n",hipGetErrorString(hipGetLastError())) void CPU2GPU(float *gpupt, float *cpupt, int noctet) { hipMemcpy(gpupt,cpupt,noctet,hipMemcpyHostToDevice); } void GPU2CPU(float *cpupt, float *gpupt, int noctet) { hipMemcpy(cpupt,gpupt,noctet,hipMemcpyDeviceToHost); } void GPU2GPU(float *cpupt, float *gpupt, int noctet) { hipMemcpy(cpupt,gpupt,noctet,hipMemcpyDeviceToDevice); } void CPU2GPU_INT(int *gpupt, int *cpupt, int noctet) { hipMemcpy(gpupt,cpupt,noctet,hipMemcpyHostToDevice); } void GPU2CPU_INT(int *cpupt, int *gpupt, int noctet) { hipMemcpy(cpupt,gpupt,noctet,hipMemcpyDeviceToHost); } void CPU2GPU_UINT(unsigned int *gpupt, unsigned int *cpupt, int noctet) { hipMemcpy(gpupt,cpupt,noctet,hipMemcpyHostToDevice); } void GPU2CPU_UINT(unsigned int *cpupt, unsigned int *gpupt, int noctet) { hipMemcpy(cpupt,gpupt,noctet,hipMemcpyDeviceToHost); } struct OCT* cell2oct(struct CELL* cell) { long int adress; struct OCT *poct; adress=(long int) cell; adress=adress-cell->idx*sizeof(struct CELL); poct=(struct OCT*) adress; return poct; } //------------------------------------------------------------------------ void getcellnei(int cindex, int *neip, int *cell) { switch(cindex){ case 0: neip[0]=0;cell[0]=1; neip[1]=6;cell[1]=1; neip[2]=2;cell[2]=2; neip[3]=6;cell[3]=2; neip[4]=4;cell[4]=4; neip[5]=6;cell[5]=4; break; case 1: neip[0]=6;cell[0]=0; neip[1]=1;cell[1]=0; neip[2]=2;cell[2]=3; neip[3]=6;cell[3]=3; neip[4]=4;cell[4]=5; neip[5]=6;cell[5]=5; break; case 2: neip[0]=0;cell[0]=3; neip[1]=6;cell[1]=3; neip[2]=6;cell[2]=0; neip[3]=3;cell[3]=0; neip[4]=4;cell[4]=6; neip[5]=6;cell[5]=6; break; case 3: neip[0]=6;cell[0]=2; neip[1]=1;cell[1]=2; neip[2]=6;cell[2]=1; neip[3]=3;cell[3]=1; neip[4]=4;cell[4]=7; neip[5]=6;cell[5]=7; break; case 4: neip[0]=0;cell[0]=5; neip[1]=6;cell[1]=5; neip[2]=2;cell[2]=6; neip[3]=6;cell[3]=6; neip[4]=6;cell[4]=0; neip[5]=5;cell[5]=0; break; case 5: neip[0]=6;cell[0]=4; neip[1]=1;cell[1]=4; neip[2]=2;cell[2]=7; neip[3]=6;cell[3]=7; neip[4]=6;cell[4]=1; neip[5]=5;cell[5]=1; break; case 6: neip[0]=0;cell[0]=7; neip[1]=6;cell[1]=7; neip[2]=6;cell[2]=4; neip[3]=3;cell[3]=4; neip[4]=6;cell[4]=2; neip[5]=5;cell[5]=2; break; case 7: neip[0]=6;cell[0]=6; neip[1]=1;cell[1]=6; neip[2]=6;cell[2]=5; neip[3]=3;cell[3]=5; neip[4]=6;cell[4]=3; neip[5]=5;cell[5]=3; break; } } // ==================================================================== __global__ void carte_mass(float dx, float *xp, float *yp, float *zp, float* md, float *mass, int offx, int offy, int offz) { int thx=threadIdx.x; int bx=blockIdx.x; // Charge les positions depuis la mmoire globale vers la mmoire partage int dataPos = bx*blockDim.x + thx; // position du thread dans data /* float x = xp[dataPos]/dx; */ /* float y = yp[dataPos]/dx; */ /* float z = zp[dataPos]/dx; */ /* int xc=(int)x; */ /* int yc=(int)y; */ /* int zc=(int)z; */ float tx,ty,tz; tx=(1.-offx)+(2.*offx-1.)*xp[dataPos]/dx; ty=(1.-offy)+(2.*offy-1.)*yp[dataPos]/dx; tz=(1.-offz)+(2.*offz-1.)*zp[dataPos]/dx; /* tx=(1.-offx)+(2.*offx-1.)*(x-xc); */ /* ty=(1.-offy)+(2.*offy-1.)*(y-yc); */ /* tz=(1.-offz)+(2.*offz-1.)*(z-zc); */ mass[dataPos]=tx*ty*tz/(dx*dx*dx)*md[dataPos]; // } // ==================================================================== __global__ void carte_flag_previous(int *d_data, unsigned int *d_flag ) { unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; unsigned int flag; int idglob=bx*blockDim.x+tx; uint current =d_data[idglob]; uint previous=d_data[idglob-1]; flag=((current!=previous)||(idglob==0)); d_flag[idglob]=flag; } // ==================================================================== __global__ void carte_flag_next(int *d_data, unsigned int *d_flag, int np ) { unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; unsigned int flag; int idglob=bx*blockDim.x+tx; uint current =d_data[idglob]; uint next=d_data[idglob+1]; flag=((current!=next)||(idglob==(np-1))); d_flag[idglob]=flag; } // ================================================================ struct OCT * cic_nei_oct(struct OCT* curoct, int cx, int cy, int cz) { struct OCT* woct; struct OCT* newoct; int ioct; // computing the oct "index" for neighbor cells ioct=(cx==2)+((cy==2)<<1)+((cz==2)<<2); woct=NULL; switch(ioct){ case 0: //------------------------------------- woct=curoct; break; case 1://------------------------------------- woct=curoct->nei[1]->child; break; case 2://------------------------------------- woct=curoct->nei[3]->child; break; case 4://------------------------------------- woct=curoct->nei[5]->child; break; case 3://------------------------------------- if(curoct->nei[1]->child!=NULL){ woct=curoct->nei[1]->child->nei[3]->child; } else if(curoct->nei[3]->child!=NULL){ woct=curoct->nei[3]->child->nei[1]->child; } break; case 5://------------------------------------- if(curoct->nei[1]->child!=NULL){ woct=curoct->nei[1]->child->nei[5]->child; } else if(curoct->nei[5]->child!=NULL){ woct=curoct->nei[5]->child->nei[1]->child; } break; case 6://------------------------------------- if(curoct->nei[3]->child!=NULL){ woct=curoct->nei[3]->child->nei[5]->child; } else if(curoct->nei[5]->child!=NULL){ woct=curoct->nei[5]->child->nei[3]->child; } break; case 7://------------------------------------- if(curoct->nei[1]->child!=NULL){ if(curoct->nei[1]->child->nei[3]->child!=NULL){ woct=curoct->nei[1]->child->nei[3]->child->nei[5]->child; } else if(curoct->nei[1]->child->nei[5]->child!=NULL){ woct=curoct->nei[1]->child->nei[5]->child->nei[3]->child; } } if((curoct->nei[3]->child!=NULL)&&(woct==NULL)){ if(curoct->nei[3]->child->nei[1]->child!=NULL){ woct=curoct->nei[3]->child->nei[1]->child->nei[5]->child; } else if(curoct->nei[3]->child->nei[5]->child!=NULL){ woct=curoct->nei[3]->child->nei[5]->child->nei[1]->child; } } if((curoct->nei[5]->child!=NULL)&&(woct==NULL)){ if(curoct->nei[5]->child->nei[1]->child!=NULL){ woct=curoct->nei[5]->child->nei[1]->child->nei[3]->child; } else if(curoct->nei[5]->child->nei[3]->child!=NULL){ woct=curoct->nei[5]->child->nei[3]->child->nei[1]->child; } } break; } return woct; } // ==================================================================== void getparticles(struct OCT *curoct, int *keyp, float *xp, float *yp, float *zp, float *mp, int *ipart, int root, float ix0, float iy0, float iz0, int idxoct,float idxcur, int iicell){ int icell,icell2; struct PART *nexp; struct PART *curp; float x0,y0,z0; float xpc,ypc,zpc; int cx,cy,cz; int i,j,k; float dxcur; struct OCT * woct; int inx,iny,inz; for(icell=0;icell<8;icell++) // looping over cells in oct { if(root){ // we start the recursion dxcur=1./(1<<(curoct->level)); // size of a cell x0=(curoct->x+(icell%2)*dxcur); //corner coordinates y0=(curoct->y+((icell/2)%2)*dxcur); z0=(curoct->z+(icell/4)*dxcur); iicell=icell; } else{ x0=ix0; y0=iy0; z0=iz0; dxcur=idxcur; } // =============== looking for cic neighbors at levels >= current level for(i=0;i<2;i++){ for(j=0;j<2;j++){ for(k=0;k<2;k++){ if((!root)*((i+j+k)!=0)) break; // for higher levels particle the 8 cells should not be explored cx=(icell&1)+i; cy=((icell>>1)&1)+j; cz=(icell>>2)+k; // getting the neighbor oct woct=cic_nei_oct(curoct,cx,cy,cz); // at this stage we have the recipient oct // we recompute the cell index icell2=(cx&1)+((cy&1)<<1)+((cz&1)<<2); // gathering particles if(woct!=NULL){ if(woct->cell[icell2].child!=NULL){ // the cell is refined we go one level further getparticles(woct->cell[icell2].child, keyp,xp,yp,zp,mp,ipart,0,x0,y0,z0,idxoct,dxcur,iicell); } else{ nexp=woct->cell[icell2].phead; //sweeping the particles of the current cell */ if(nexp!=NULL){ do{ curp=nexp; nexp=curp->next; xpc=curp->x-dxcur*0.5;xpc=(xpc<0?1.+xpc:xpc); ypc=curp->y-dxcur*0.5;ypc=(ypc<0?1.+ypc:ypc); zpc=curp->z-dxcur*0.5;zpc=(zpc<0?1.+zpc:zpc); // testing the particle (assuming particles are centered) // is the lower left corner inside ? inx=((xpc-x0)>=0.)*((xpc-x0)<dxcur); iny=((ypc-y0)>=0.)*((ypc-y0)<dxcur); inz=((zpc-z0)>=0.)*((zpc-z0)<dxcur); //printf("m=%e inx=%d iny=%d inz=%d xp=%e x0=%e\n",curp->mass,inx,iny,inz,ypc,y0); if((inx*iny*inz)!=1) continue; keyp[*ipart]=idxoct*8+iicell; // computing the key of each particle // we compute the relative position to ensure a consistency between cell and particle xp[*ipart]=xpc-x0; yp[*ipart]=ypc-y0; zp[*ipart]=zpc-z0; mp[*ipart]=curp->mass; (*ipart)=(*ipart)+1; }while(nexp!=NULL); } } } } } } } } // ==================================================================== void call_cic_GPU(int levelmax,int levelcoarse,struct OCT **firstoct, struct CPUINFO *cpu){ int level; struct OCT *nextoct; struct OCT *curoct; struct OCT *woct; int icell; /* struct PART *curp; */ /* struct PART *nexp; */ float dxcur; int ipart,ip; int i,j,k; // ========================= CPU STUFF ===================== int *keyp; //contains the particle keys unsigned int *flag; //contains the particle flags float *xp; //contains the particle x float *yp; //contains the particle y float *zp; //contains the particle z float *mp; //contains the particle mass float *massp2; //contains the particle cumulated mass // int keyo[NOMAX]; // contains the oct key //int keyoloc; struct OCT **keyodict; // dictionnary keyo -> pointer // int ncart; // contains the max dimension along one direction // ========================= GPU STUFF ===================== // dimension configuration for Particle treatment dim3 dimGridPart(NPMAX/NPBLOCK); // np/64 dim3 dimBlockPart(NPBLOCK); // 64 unsigned int *flag_d; int *keyp_d; float *xp_d; float *yp_d; float *zp_d; float *m_d; float *mass_d; float *mass2_d; if(cpu->rank==RANK_DISP) printf("==> start CIC on GPU\n"); // alloc 2 keyp=(int*)calloc(NPMAX,sizeof(int)); flag=(unsigned int*)calloc(NPMAX,sizeof(unsigned int)); xp=(float*)calloc(NPMAX,sizeof(float)); yp=(float*)calloc(NPMAX,sizeof(float)); zp=(float*)calloc(NPMAX,sizeof(float)); mp=(float*)calloc(NPMAX,sizeof(float)); massp2=(float*)calloc(NPMAX,sizeof(float)); keyodict=(struct OCT **)calloc(NOMAX,sizeof(struct OCT *)); hipMalloc((void **)&mass_d,sizeof(float)*NPMAX); hipMalloc((void **)&mass2_d,sizeof(float)*NPMAX); hipMalloc((void **)&flag_d,sizeof(unsigned int)*NPMAX); hipMalloc((void **)&keyp_d,sizeof(int)*NPMAX); hipMalloc((void **)&xp_d,sizeof(float)*NPMAX); hipMalloc((void **)&yp_d,sizeof(float)*NPMAX); hipMalloc((void **)&zp_d,sizeof(float)*NPMAX); hipMalloc((void **)&m_d,sizeof(float)*NPMAX); // ========================= GPU STUFF ===================== // ========================== First we clean the density for(level=levelmax;level>=levelcoarse;level--) { nextoct=firstoct[level-1]; if(nextoct==NULL) continue; do // sweeping level { curoct=nextoct; nextoct=curoct->next; for(icell=0;icell<8;icell++) curoct->cell[icell].density=0.; }while(nextoct!=NULL); } // =========================== Second start CIC for(ip=0;ip<NPMAX;ip++) keyp[ip]=-1; // initialize the particle keys for(ip=0;ip<NOMAX;ip++){ keyodict[ip]=NULL; } int idxoct; for(level=levelcoarse;level<=levelmax;level++) { dxcur=1./(1<<level); // size of a cell nextoct=firstoct[level-1]; //ncart=1<<(level-1); // number of octs along one dimension // ======================== sweeping the octs sort the oct keys if(nextoct==NULL) continue; idxoct=0; ipart=0; memset(keyp,0,sizeof(int)*NPMAX); memset(xp,0,sizeof(int)*NPMAX); memset(yp,0,sizeof(int)*NPMAX); memset(zp,0,sizeof(int)*NPMAX); memset(mp,0,sizeof(int)*NPMAX); memset(keyodict,0,sizeof(struct OCT* )*NOMAX); // gathering particles from levels>=current level do { // =============== FIX LARGE OCT NUMBERS ===== !!!! curoct=nextoct; nextoct=curoct->next; // we skip octs which do not belong to the current CPU (they will be considered through mpi) if(curoct->cpu!=cpu->rank) continue; keyodict[idxoct]=curoct; // building the dictionnary // gathering particles from all levels >= current level getparticles(curoct, keyp,xp,yp,zp,mp,&ipart,1,0.,0.,0.,idxoct,0.,0); idxoct++; }while(nextoct!=NULL); // ================================== launching the GPU horses ===================== // --------- some config CUDPPConfiguration config; config.algorithm = CUDPP_SEGMENTED_SCAN; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; CUDPPOption direction = CUDPP_OPTION_FORWARD; CUDPPOption inclusivity = CUDPP_OPTION_INCLUSIVE; config.options = direction | inclusivity; CUDPPHandle plan; cudppPlan(&plan, config, ipart, 1, 0); // ---------- sending data CPU2GPU_INT(keyp_d,keyp,sizeof(int)*NPMAX); CPU2GPU(xp_d,xp,sizeof(float)*NPMAX); CPU2GPU(yp_d,yp,sizeof(float)*NPMAX); CPU2GPU(zp_d,zp,sizeof(float)*NPMAX); CPU2GPU( m_d,mp,sizeof(float)*NPMAX); hipMemset(mass_d,0,sizeof(float)*NPMAX); hipMemset(mass2_d,0,sizeof(float)*NPMAX); hipMemset(flag_d,0,sizeof(unsigned int)*NPMAX); // ---------- kernels start // flag segments hipLaunchKernelGGL(( carte_flag_next), dim3(dimGridPart),dim3(dimBlockPart), 0, 0, keyp_d,flag_d,ipart); hipDeviceSynchronize(); GPU2CPU_UINT(flag,flag_d,sizeof(unsigned int)*NPMAX); hipLaunchKernelGGL(( carte_flag_previous), dim3(dimGridPart),dim3(dimBlockPart), 0, 0, keyp_d,flag_d); hipDeviceSynchronize(); // scanning the 8 CIC calculations int cx,cy,cz; for(i=0;i<2;i++) { for(j=0;j<2;j++) { for(k=0;k<2;k++) { // segment scan hipMemset(mass_d,0,sizeof(float)*NPMAX); hipLaunchKernelGGL(( carte_mass), dim3(dimGridPart),dim3(dimBlockPart), 0, 0, dxcur,xp_d,yp_d,zp_d,m_d,mass_d,i,j,k); hipDeviceSynchronize(); //CUERR(); GPU2CPU(massp2,mass_d,sizeof(float)*NPMAX); cudppSegmentedScan(plan, mass2_d, mass_d, flag_d, ipart); hipDeviceSynchronize(); // ------------ getting the data back GPU2CPU(massp2,mass2_d,sizeof(float)*NPMAX); // ------------ scatter in the tree int idxoct,icell; for(ip=0;ip<ipart;ip++){ if(flag[ip]==1){ idxoct=keyp[ip]>>3; // oct index icell=keyp[ip]&7; // cell index curoct=keyodict[idxoct]; // the current oct cx=(icell&1)+i; cy=((icell>>1)&1)+j; cz=(icell>>2)+k; // getting the neighbor oct woct=cic_nei_oct(curoct,cx,cy,cz); // at this stage we have the recipitent oct // we recompute the cell index icell=(cx&1)+((cy&1)<<1)+((cz&1)<<2); if(woct!=NULL){ woct->cell[icell].density+=massp2[ip]; } } } } } } // ------------- Done cudppDestroyPlan(plan); } // going to next level hipFree(mass_d); hipFree(mass2_d); hipFree(xp_d); hipFree(yp_d); hipFree(zp_d); hipFree(m_d); hipFree(keyp_d); hipFree(flag_d); free(flag); free(keyp); free(keyodict); free(xp); free(yp); free(zp); free(mp); free(massp2); }
b1d5788e8301ea21aeacf78b4fb95016479e79dc.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "prototypes.h" #include "oct.h" #include <cutil.h> #include <cudpp.h> #define NPMAX 2097152 #define NOMAX 262144 #define NPBLOCK 64 struct PLIGHT{ int keyp; float xp; float yp; float zp; float mp; }; extern "C" void call_cic_GPU(int levelmax,int levelcoarse,struct OCT **firstoct, struct CPUINFO *cpu); //#define CUERR() printf("\n %s \n",cudaGetErrorString(cudaGetLastError())) void CPU2GPU(float *gpupt, float *cpupt, int noctet) { cudaMemcpy(gpupt,cpupt,noctet,cudaMemcpyHostToDevice); } void GPU2CPU(float *cpupt, float *gpupt, int noctet) { cudaMemcpy(cpupt,gpupt,noctet,cudaMemcpyDeviceToHost); } void GPU2GPU(float *cpupt, float *gpupt, int noctet) { cudaMemcpy(cpupt,gpupt,noctet,cudaMemcpyDeviceToDevice); } void CPU2GPU_INT(int *gpupt, int *cpupt, int noctet) { cudaMemcpy(gpupt,cpupt,noctet,cudaMemcpyHostToDevice); } void GPU2CPU_INT(int *cpupt, int *gpupt, int noctet) { cudaMemcpy(cpupt,gpupt,noctet,cudaMemcpyDeviceToHost); } void CPU2GPU_UINT(unsigned int *gpupt, unsigned int *cpupt, int noctet) { cudaMemcpy(gpupt,cpupt,noctet,cudaMemcpyHostToDevice); } void GPU2CPU_UINT(unsigned int *cpupt, unsigned int *gpupt, int noctet) { cudaMemcpy(cpupt,gpupt,noctet,cudaMemcpyDeviceToHost); } struct OCT* cell2oct(struct CELL* cell) { long int adress; struct OCT *poct; adress=(long int) cell; adress=adress-cell->idx*sizeof(struct CELL); poct=(struct OCT*) adress; return poct; } //------------------------------------------------------------------------ void getcellnei(int cindex, int *neip, int *cell) { switch(cindex){ case 0: neip[0]=0;cell[0]=1; neip[1]=6;cell[1]=1; neip[2]=2;cell[2]=2; neip[3]=6;cell[3]=2; neip[4]=4;cell[4]=4; neip[5]=6;cell[5]=4; break; case 1: neip[0]=6;cell[0]=0; neip[1]=1;cell[1]=0; neip[2]=2;cell[2]=3; neip[3]=6;cell[3]=3; neip[4]=4;cell[4]=5; neip[5]=6;cell[5]=5; break; case 2: neip[0]=0;cell[0]=3; neip[1]=6;cell[1]=3; neip[2]=6;cell[2]=0; neip[3]=3;cell[3]=0; neip[4]=4;cell[4]=6; neip[5]=6;cell[5]=6; break; case 3: neip[0]=6;cell[0]=2; neip[1]=1;cell[1]=2; neip[2]=6;cell[2]=1; neip[3]=3;cell[3]=1; neip[4]=4;cell[4]=7; neip[5]=6;cell[5]=7; break; case 4: neip[0]=0;cell[0]=5; neip[1]=6;cell[1]=5; neip[2]=2;cell[2]=6; neip[3]=6;cell[3]=6; neip[4]=6;cell[4]=0; neip[5]=5;cell[5]=0; break; case 5: neip[0]=6;cell[0]=4; neip[1]=1;cell[1]=4; neip[2]=2;cell[2]=7; neip[3]=6;cell[3]=7; neip[4]=6;cell[4]=1; neip[5]=5;cell[5]=1; break; case 6: neip[0]=0;cell[0]=7; neip[1]=6;cell[1]=7; neip[2]=6;cell[2]=4; neip[3]=3;cell[3]=4; neip[4]=6;cell[4]=2; neip[5]=5;cell[5]=2; break; case 7: neip[0]=6;cell[0]=6; neip[1]=1;cell[1]=6; neip[2]=6;cell[2]=5; neip[3]=3;cell[3]=5; neip[4]=6;cell[4]=3; neip[5]=5;cell[5]=3; break; } } // ==================================================================== __global__ void carte_mass(float dx, float *xp, float *yp, float *zp, float* md, float *mass, int offx, int offy, int offz) { int thx=threadIdx.x; int bx=blockIdx.x; // Charge les positions depuis la mémoire globale vers la mémoire partagée int dataPos = bx*blockDim.x + thx; // position du thread dans data /* float x = xp[dataPos]/dx; */ /* float y = yp[dataPos]/dx; */ /* float z = zp[dataPos]/dx; */ /* int xc=(int)x; */ /* int yc=(int)y; */ /* int zc=(int)z; */ float tx,ty,tz; tx=(1.-offx)+(2.*offx-1.)*xp[dataPos]/dx; ty=(1.-offy)+(2.*offy-1.)*yp[dataPos]/dx; tz=(1.-offz)+(2.*offz-1.)*zp[dataPos]/dx; /* tx=(1.-offx)+(2.*offx-1.)*(x-xc); */ /* ty=(1.-offy)+(2.*offy-1.)*(y-yc); */ /* tz=(1.-offz)+(2.*offz-1.)*(z-zc); */ mass[dataPos]=tx*ty*tz/(dx*dx*dx)*md[dataPos]; // } // ==================================================================== __global__ void carte_flag_previous(int *d_data, unsigned int *d_flag ) { unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; unsigned int flag; int idglob=bx*blockDim.x+tx; uint current =d_data[idglob]; uint previous=d_data[idglob-1]; flag=((current!=previous)||(idglob==0)); d_flag[idglob]=flag; } // ==================================================================== __global__ void carte_flag_next(int *d_data, unsigned int *d_flag, int np ) { unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; unsigned int flag; int idglob=bx*blockDim.x+tx; uint current =d_data[idglob]; uint next=d_data[idglob+1]; flag=((current!=next)||(idglob==(np-1))); d_flag[idglob]=flag; } // ================================================================ struct OCT * cic_nei_oct(struct OCT* curoct, int cx, int cy, int cz) { struct OCT* woct; struct OCT* newoct; int ioct; // computing the oct "index" for neighbor cells ioct=(cx==2)+((cy==2)<<1)+((cz==2)<<2); woct=NULL; switch(ioct){ case 0: //------------------------------------- woct=curoct; break; case 1://------------------------------------- woct=curoct->nei[1]->child; break; case 2://------------------------------------- woct=curoct->nei[3]->child; break; case 4://------------------------------------- woct=curoct->nei[5]->child; break; case 3://------------------------------------- if(curoct->nei[1]->child!=NULL){ woct=curoct->nei[1]->child->nei[3]->child; } else if(curoct->nei[3]->child!=NULL){ woct=curoct->nei[3]->child->nei[1]->child; } break; case 5://------------------------------------- if(curoct->nei[1]->child!=NULL){ woct=curoct->nei[1]->child->nei[5]->child; } else if(curoct->nei[5]->child!=NULL){ woct=curoct->nei[5]->child->nei[1]->child; } break; case 6://------------------------------------- if(curoct->nei[3]->child!=NULL){ woct=curoct->nei[3]->child->nei[5]->child; } else if(curoct->nei[5]->child!=NULL){ woct=curoct->nei[5]->child->nei[3]->child; } break; case 7://------------------------------------- if(curoct->nei[1]->child!=NULL){ if(curoct->nei[1]->child->nei[3]->child!=NULL){ woct=curoct->nei[1]->child->nei[3]->child->nei[5]->child; } else if(curoct->nei[1]->child->nei[5]->child!=NULL){ woct=curoct->nei[1]->child->nei[5]->child->nei[3]->child; } } if((curoct->nei[3]->child!=NULL)&&(woct==NULL)){ if(curoct->nei[3]->child->nei[1]->child!=NULL){ woct=curoct->nei[3]->child->nei[1]->child->nei[5]->child; } else if(curoct->nei[3]->child->nei[5]->child!=NULL){ woct=curoct->nei[3]->child->nei[5]->child->nei[1]->child; } } if((curoct->nei[5]->child!=NULL)&&(woct==NULL)){ if(curoct->nei[5]->child->nei[1]->child!=NULL){ woct=curoct->nei[5]->child->nei[1]->child->nei[3]->child; } else if(curoct->nei[5]->child->nei[3]->child!=NULL){ woct=curoct->nei[5]->child->nei[3]->child->nei[1]->child; } } break; } return woct; } // ==================================================================== void getparticles(struct OCT *curoct, int *keyp, float *xp, float *yp, float *zp, float *mp, int *ipart, int root, float ix0, float iy0, float iz0, int idxoct,float idxcur, int iicell){ int icell,icell2; struct PART *nexp; struct PART *curp; float x0,y0,z0; float xpc,ypc,zpc; int cx,cy,cz; int i,j,k; float dxcur; struct OCT * woct; int inx,iny,inz; for(icell=0;icell<8;icell++) // looping over cells in oct { if(root){ // we start the recursion dxcur=1./(1<<(curoct->level)); // size of a cell x0=(curoct->x+(icell%2)*dxcur); //corner coordinates y0=(curoct->y+((icell/2)%2)*dxcur); z0=(curoct->z+(icell/4)*dxcur); iicell=icell; } else{ x0=ix0; y0=iy0; z0=iz0; dxcur=idxcur; } // =============== looking for cic neighbors at levels >= current level for(i=0;i<2;i++){ for(j=0;j<2;j++){ for(k=0;k<2;k++){ if((!root)*((i+j+k)!=0)) break; // for higher levels particle the 8 cells should not be explored cx=(icell&1)+i; cy=((icell>>1)&1)+j; cz=(icell>>2)+k; // getting the neighbor oct woct=cic_nei_oct(curoct,cx,cy,cz); // at this stage we have the recipient oct // we recompute the cell index icell2=(cx&1)+((cy&1)<<1)+((cz&1)<<2); // gathering particles if(woct!=NULL){ if(woct->cell[icell2].child!=NULL){ // the cell is refined we go one level further getparticles(woct->cell[icell2].child, keyp,xp,yp,zp,mp,ipart,0,x0,y0,z0,idxoct,dxcur,iicell); } else{ nexp=woct->cell[icell2].phead; //sweeping the particles of the current cell */ if(nexp!=NULL){ do{ curp=nexp; nexp=curp->next; xpc=curp->x-dxcur*0.5;xpc=(xpc<0?1.+xpc:xpc); ypc=curp->y-dxcur*0.5;ypc=(ypc<0?1.+ypc:ypc); zpc=curp->z-dxcur*0.5;zpc=(zpc<0?1.+zpc:zpc); // testing the particle (assuming particles are centered) // is the lower left corner inside ? inx=((xpc-x0)>=0.)*((xpc-x0)<dxcur); iny=((ypc-y0)>=0.)*((ypc-y0)<dxcur); inz=((zpc-z0)>=0.)*((zpc-z0)<dxcur); //printf("m=%e inx=%d iny=%d inz=%d xp=%e x0=%e\n",curp->mass,inx,iny,inz,ypc,y0); if((inx*iny*inz)!=1) continue; keyp[*ipart]=idxoct*8+iicell; // computing the key of each particle // we compute the relative position to ensure a consistency between cell and particle xp[*ipart]=xpc-x0; yp[*ipart]=ypc-y0; zp[*ipart]=zpc-z0; mp[*ipart]=curp->mass; (*ipart)=(*ipart)+1; }while(nexp!=NULL); } } } } } } } } // ==================================================================== void call_cic_GPU(int levelmax,int levelcoarse,struct OCT **firstoct, struct CPUINFO *cpu){ int level; struct OCT *nextoct; struct OCT *curoct; struct OCT *woct; int icell; /* struct PART *curp; */ /* struct PART *nexp; */ float dxcur; int ipart,ip; int i,j,k; // ========================= CPU STUFF ===================== int *keyp; //contains the particle keys unsigned int *flag; //contains the particle flags float *xp; //contains the particle x float *yp; //contains the particle y float *zp; //contains the particle z float *mp; //contains the particle mass float *massp2; //contains the particle cumulated mass // int keyo[NOMAX]; // contains the oct key //int keyoloc; struct OCT **keyodict; // dictionnary keyo -> pointer // int ncart; // contains the max dimension along one direction // ========================= GPU STUFF ===================== // dimension configuration for Particle treatment dim3 dimGridPart(NPMAX/NPBLOCK); // np/64 dim3 dimBlockPart(NPBLOCK); // 64 unsigned int *flag_d; int *keyp_d; float *xp_d; float *yp_d; float *zp_d; float *m_d; float *mass_d; float *mass2_d; if(cpu->rank==RANK_DISP) printf("==> start CIC on GPU\n"); // alloc 2 keyp=(int*)calloc(NPMAX,sizeof(int)); flag=(unsigned int*)calloc(NPMAX,sizeof(unsigned int)); xp=(float*)calloc(NPMAX,sizeof(float)); yp=(float*)calloc(NPMAX,sizeof(float)); zp=(float*)calloc(NPMAX,sizeof(float)); mp=(float*)calloc(NPMAX,sizeof(float)); massp2=(float*)calloc(NPMAX,sizeof(float)); keyodict=(struct OCT **)calloc(NOMAX,sizeof(struct OCT *)); cudaMalloc((void **)&mass_d,sizeof(float)*NPMAX); cudaMalloc((void **)&mass2_d,sizeof(float)*NPMAX); cudaMalloc((void **)&flag_d,sizeof(unsigned int)*NPMAX); cudaMalloc((void **)&keyp_d,sizeof(int)*NPMAX); cudaMalloc((void **)&xp_d,sizeof(float)*NPMAX); cudaMalloc((void **)&yp_d,sizeof(float)*NPMAX); cudaMalloc((void **)&zp_d,sizeof(float)*NPMAX); cudaMalloc((void **)&m_d,sizeof(float)*NPMAX); // ========================= GPU STUFF ===================== // ========================== First we clean the density for(level=levelmax;level>=levelcoarse;level--) { nextoct=firstoct[level-1]; if(nextoct==NULL) continue; do // sweeping level { curoct=nextoct; nextoct=curoct->next; for(icell=0;icell<8;icell++) curoct->cell[icell].density=0.; }while(nextoct!=NULL); } // =========================== Second start CIC for(ip=0;ip<NPMAX;ip++) keyp[ip]=-1; // initialize the particle keys for(ip=0;ip<NOMAX;ip++){ keyodict[ip]=NULL; } int idxoct; for(level=levelcoarse;level<=levelmax;level++) { dxcur=1./(1<<level); // size of a cell nextoct=firstoct[level-1]; //ncart=1<<(level-1); // number of octs along one dimension // ======================== sweeping the octs sort the oct keys if(nextoct==NULL) continue; idxoct=0; ipart=0; memset(keyp,0,sizeof(int)*NPMAX); memset(xp,0,sizeof(int)*NPMAX); memset(yp,0,sizeof(int)*NPMAX); memset(zp,0,sizeof(int)*NPMAX); memset(mp,0,sizeof(int)*NPMAX); memset(keyodict,0,sizeof(struct OCT* )*NOMAX); // gathering particles from levels>=current level do { // =============== FIX LARGE OCT NUMBERS ===== !!!! curoct=nextoct; nextoct=curoct->next; // we skip octs which do not belong to the current CPU (they will be considered through mpi) if(curoct->cpu!=cpu->rank) continue; keyodict[idxoct]=curoct; // building the dictionnary // gathering particles from all levels >= current level getparticles(curoct, keyp,xp,yp,zp,mp,&ipart,1,0.,0.,0.,idxoct,0.,0); idxoct++; }while(nextoct!=NULL); // ================================== launching the GPU horses ===================== // --------- some config CUDPPConfiguration config; config.algorithm = CUDPP_SEGMENTED_SCAN; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; CUDPPOption direction = CUDPP_OPTION_FORWARD; CUDPPOption inclusivity = CUDPP_OPTION_INCLUSIVE; config.options = direction | inclusivity; CUDPPHandle plan; cudppPlan(&plan, config, ipart, 1, 0); // ---------- sending data CPU2GPU_INT(keyp_d,keyp,sizeof(int)*NPMAX); CPU2GPU(xp_d,xp,sizeof(float)*NPMAX); CPU2GPU(yp_d,yp,sizeof(float)*NPMAX); CPU2GPU(zp_d,zp,sizeof(float)*NPMAX); CPU2GPU( m_d,mp,sizeof(float)*NPMAX); cudaMemset(mass_d,0,sizeof(float)*NPMAX); cudaMemset(mass2_d,0,sizeof(float)*NPMAX); cudaMemset(flag_d,0,sizeof(unsigned int)*NPMAX); // ---------- kernels start // flag segments carte_flag_next<<<dimGridPart,dimBlockPart>>>(keyp_d,flag_d,ipart); cudaThreadSynchronize(); GPU2CPU_UINT(flag,flag_d,sizeof(unsigned int)*NPMAX); carte_flag_previous<<<dimGridPart,dimBlockPart>>>(keyp_d,flag_d); cudaThreadSynchronize(); // scanning the 8 CIC calculations int cx,cy,cz; for(i=0;i<2;i++) { for(j=0;j<2;j++) { for(k=0;k<2;k++) { // segment scan cudaMemset(mass_d,0,sizeof(float)*NPMAX); carte_mass<<<dimGridPart,dimBlockPart>>>(dxcur,xp_d,yp_d,zp_d,m_d,mass_d,i,j,k); cudaThreadSynchronize(); //CUERR(); GPU2CPU(massp2,mass_d,sizeof(float)*NPMAX); cudppSegmentedScan(plan, mass2_d, mass_d, flag_d, ipart); cudaThreadSynchronize(); // ------------ getting the data back GPU2CPU(massp2,mass2_d,sizeof(float)*NPMAX); // ------------ scatter in the tree int idxoct,icell; for(ip=0;ip<ipart;ip++){ if(flag[ip]==1){ idxoct=keyp[ip]>>3; // oct index icell=keyp[ip]&7; // cell index curoct=keyodict[idxoct]; // the current oct cx=(icell&1)+i; cy=((icell>>1)&1)+j; cz=(icell>>2)+k; // getting the neighbor oct woct=cic_nei_oct(curoct,cx,cy,cz); // at this stage we have the recipitent oct // we recompute the cell index icell=(cx&1)+((cy&1)<<1)+((cz&1)<<2); if(woct!=NULL){ woct->cell[icell].density+=massp2[ip]; } } } } } } // ------------- Done cudppDestroyPlan(plan); } // going to next level cudaFree(mass_d); cudaFree(mass2_d); cudaFree(xp_d); cudaFree(yp_d); cudaFree(zp_d); cudaFree(m_d); cudaFree(keyp_d); cudaFree(flag_d); free(flag); free(keyp); free(keyodict); free(xp); free(yp); free(zp); free(mp); free(massp2); }
dmgecsrmv.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmgecsrmv.cu, normal z -> d, Thu Oct 8 23:05:47 2020 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ double dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; double val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors hipLaunchKernelGGL(( dmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream(), m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
dmgecsrmv.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmgecsrmv.cu, normal z -> d, Thu Oct 8 23:05:47 2020 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ double dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; double val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors dmgecsrmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream()>>> (m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
48310ff3c04191be7b8b7fa40598867157304367.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <float.h> #include <stdint.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer, const int32_t groups_buffer_size) { extern __shared__ int64_t fast_bins[]; if (threadIdx.x == 0) { memcpy(fast_bins, groups_buffer, groups_buffer_size); } __syncthreads(); return fast_bins; } /** * Dynamically allocates shared memory per block. * The amount of shared memory allocated is defined at kernel launch time. * Returns a pointer to the beginning of allocated shared memory */ extern "C" __device__ int64_t* alloc_shared_mem_dynamic() { extern __shared__ int64_t groups_buffer_smem[]; return groups_buffer_smem; } /** * Set the allocated shared memory elements to be equal to the 'identity_element'. * groups_buffer_size: number of 64-bit elements in shared memory per thread-block * NOTE: groups_buffer_size is in units of 64-bit elements. */ extern "C" __device__ void set_shared_mem_to_identity( int64_t* groups_buffer_smem, const int32_t groups_buffer_size, const int64_t identity_element = 0) { #pragma unroll for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) { groups_buffer_smem[i] = identity_element; } __syncthreads(); } /** * Initialize dynamic shared memory: * 1. Allocates dynamic shared memory * 2. Set every allocated element to be equal to the 'identity element', by default zero. */ extern "C" __device__ const int64_t* init_shared_mem_dynamic( const int64_t* groups_buffer, const int32_t groups_buffer_size) { int64_t* groups_buffer_smem = alloc_shared_mem_dynamic(); set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size); return groups_buffer_smem; } extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) { __syncthreads(); if (threadIdx.x == 0) { memcpy(dest, src, sz); } } extern "C" __device__ void write_back_smem_nop(int64_t* dest, int64_t* src, const int32_t sz) {} extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) {} /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the stored bin ID, to be written back][32..63: the count result, to be * aggregated] */ extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i); int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1, static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id); } } } /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written * back] */ extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i); int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i), static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) = static_cast<int32_t>(bin_id); } } } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this funciton is to return true, if any of the following two * scnearios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumess that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { uint32_t off = h; { const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key); if (old == get_empty_key<T>()) { for (size_t i = 0; i < key_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return h; } } __syncthreads(); off = h; for (size_t i = 0; i < key_count; ++i) { if (groups_buffer[off] != key[i]) { return -1; } off += entry_count; } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } // As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute // capability >= 6.0. #if TORCH_HIP_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMaxDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *((unsigned long long int*)&val) : __double_as_longlong(max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { #if (TORCH_HIP_VERSION >= 9000) __syncwarp(); #endif } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: it * currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { #if (TORCH_HIP_VERSION >= 9000) // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } #endif }
48310ff3c04191be7b8b7fa40598867157304367.cu
#include <cuda.h> #include <float.h> #include <stdint.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer, const int32_t groups_buffer_size) { extern __shared__ int64_t fast_bins[]; if (threadIdx.x == 0) { memcpy(fast_bins, groups_buffer, groups_buffer_size); } __syncthreads(); return fast_bins; } /** * Dynamically allocates shared memory per block. * The amount of shared memory allocated is defined at kernel launch time. * Returns a pointer to the beginning of allocated shared memory */ extern "C" __device__ int64_t* alloc_shared_mem_dynamic() { extern __shared__ int64_t groups_buffer_smem[]; return groups_buffer_smem; } /** * Set the allocated shared memory elements to be equal to the 'identity_element'. * groups_buffer_size: number of 64-bit elements in shared memory per thread-block * NOTE: groups_buffer_size is in units of 64-bit elements. */ extern "C" __device__ void set_shared_mem_to_identity( int64_t* groups_buffer_smem, const int32_t groups_buffer_size, const int64_t identity_element = 0) { #pragma unroll for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) { groups_buffer_smem[i] = identity_element; } __syncthreads(); } /** * Initialize dynamic shared memory: * 1. Allocates dynamic shared memory * 2. Set every allocated element to be equal to the 'identity element', by default zero. */ extern "C" __device__ const int64_t* init_shared_mem_dynamic( const int64_t* groups_buffer, const int32_t groups_buffer_size) { int64_t* groups_buffer_smem = alloc_shared_mem_dynamic(); set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size); return groups_buffer_smem; } extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) { __syncthreads(); if (threadIdx.x == 0) { memcpy(dest, src, sz); } } extern "C" __device__ void write_back_smem_nop(int64_t* dest, int64_t* src, const int32_t sz) {} extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) {} /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the stored bin ID, to be written back][32..63: the count result, to be * aggregated] */ extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i); int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1, static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id); } } } /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written * back] */ extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i); int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i), static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) = static_cast<int32_t>(bin_id); } } } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this funciton is to return true, if any of the following two * scnearios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumess that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { uint32_t off = h; { const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key); if (old == get_empty_key<T>()) { for (size_t i = 0; i < key_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return h; } } __syncthreads(); off = h; for (size_t i = 0; i < key_count; ++i) { if (groups_buffer[off] != key[i]) { return -1; } off += entry_count; } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } // As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute // capability >= 6.0. #if CUDA_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMaxDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *((unsigned long long int*)&val) : __double_as_longlong(max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { #if (CUDA_VERSION >= 9000) __syncwarp(); #endif } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: it * currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { #if (CUDA_VERSION >= 9000) // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } #endif }
147156b20d10015d6611455c0e00072014885555.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/layers/cluster_centroid_dist_layer.hpp" #include <vector> namespace caffe{ //template <typename Dtype> //__global__ void eucliean_vec_forward(const int n, const int num, const int num_cluster, // const int feat_dim, const Dtype* bottom_data, const Dtype* centroid_data, // Dtype* top_data) //{ // CUDA_KERNEL_LOOP(index, n) { // int k_idx = n % num_cluster; // int n_idx = n / num_cluster; // const Dtype* bottom_ptr = bottom_data + n_idx*feat_dim; // const Dtype* centroid_ptr = centroid_data + k_idx*feat_dim; // Dtype* top_ptr = top_data + n; // top_ptr[0] = 0; // for (int i = 0; i < feat_dim; ++i) // { // top_ptr[0] += 0.5*(bottom_ptr[i] - centroid_ptr[i])*(bottom_ptr[i] - centroid_ptr[i]); // } // } //} template <typename Dtype> __global__ void set_diag_zero(const int n, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { int idx = index*n + index; y[idx] = 0; } } template <typename Dtype> __global__ void delete_diag(const int n, const int sqrtN, Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { //int idx = index*n + index; //y[idx] = 0; int dimx = index % sqrtN; int dimy = index / sqrtN; if (dimx != dimy) { if (dimx > dimy) { int idx = dimy*(sqrtN - 1) + dimx - 1; y[idx] = x[index]; } else{ int idx = dimy*(sqrtN - 1) + dimx; y[idx] = x[index]; } } else { x[index] = 0; } } } template <typename Dtype> __global__ void expand_diag(const int n, const int sqrtN, Dtype* x, const Dtype* y) { CUDA_KERNEL_LOOP(index, n) { //int idx = index*n + index; //y[idx] = 0; int dimx = index % sqrtN; int dimy = index / sqrtN; if (dimx != dimy) { if (dimx > dimy) { int idx = dimy*(sqrtN - 1) + dimx - 1; x[index] = y[idx]; } else{ int idx = dimy*(sqrtN - 1) + dimx; x[index] = y[idx]; } } else{ x[index] = 0; } } } template <typename Dtype> void ClusterCentroidDistLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //caffe_gpu_mul(bottom[0]->count(), bottom[0]->gpu_data(), bottom[0]->gpu_data(), bottom_cache_.mutable_gpu_data()); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // 1, // centroid_dim_, // (Dtype)1.0, // bottom_cache_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_diff() // ); //caffe_gpu_powx(bottom[0]->num(), bottom_cache_.gpu_diff(), (Dtype)0.5, bottom_cache_.mutable_gpu_diff()); //caffe_gpu_gemm( // CblasNoTrans, CblasNoTrans, // bottom[0]->num(), // centroid_dim_, // 1, // (Dtype)1.0, // bottom_cache_.gpu_diff(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_data() // ); //caffe_gpu_div(bottom[0]->count(), bottom[0]->gpu_data(), bottom_cache_.gpu_data(), bottom_cache_.mutable_gpu_diff()); //if (use_T_) // caffe_gpu_scal(bottom[0]->count(), (Dtype)T, bottom_cache_.mutable_gpu_diff()); //const Dtype* bottom_data = bottom_cache_.gpu_diff(); //= bottom[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = is_self_dist_ ? top_cache_.mutable_gpu_data() : top[0]->mutable_gpu_data(); const Dtype* centroid_data = compute_dist_ ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); const int count_blobs_ = compute_dist_ ? bottom[1]->count() : this->blobs_[0]->count(); const int top_count = is_self_dist_ ? top_cache_.count() : top[0]->count(); if (!compute_dist_) { if (is_sample_base_cls) { caffe_copy(bottom[0]->count(), bottom_data, this->blobs_[0]->mutable_gpu_data()); for (int i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = i; } } else { if (!initialized_) { if (init_count_ >= this->blobs_[0]->count()) { initialized_ = true; LOG(INFO) << "intial centroid complete."; } else { int count = min(bottom[0]->count(), this->blobs_[0]->count() - init_count_); caffe_copy(count, bottom_data, this->blobs_[0]->mutable_gpu_data() + init_count_); caffe_rng_gaussian<Dtype>(top[0]->count(), Dtype(0), Dtype(1), top[0]->mutable_cpu_data()); init_count_ += count; LOG(INFO) << init_count_; return; } } } } // square of data. caffe_gpu_mul(bottom[0]->count(), bottom_data, bottom_data, square_feat_.mutable_gpu_data()); // sum along centroid_dim_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_samp_, //bottom[0]->num(), 1, centroid_dim_, (Dtype)0.5, // / centroid_dim_, square_feat_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data()); // span along num_cluster_ dim caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, //bottom[0]->num(), num_samp_, num_cluster_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, top_data ); // dot product of centroid and feat caffe_gpu_gemm( CblasNoTrans, CblasTrans, //bottom[0]->num(), num_samp_, num_cluster_, centroid_dim_, (Dtype)-1.0, // / centroid_dim_, bottom_data, centroid_data, (Dtype)1.0, top_data); //square of centroid. caffe_gpu_mul( count_blobs_, centroid_data, centroid_data, square_cluster_.mutable_gpu_data()); //sum along centroid_dim_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_cluster_, 1, centroid_dim_, (Dtype)1.0, // / centroid_dim_, square_cluster_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data()); //span along feat num caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, //bottom[0]->num(), num_samp_, num_cluster_, 1, (Dtype)0.5, ones_.gpu_data(), column_.gpu_data(), (Dtype)1.0, top_data);//cache_cluster_.mutable_gpu_data()); //caffe_gpu_powx(top[0]->count(), top_data, (Dtype)0.5, top_data); //caffe_gpu_scal(top[0]->count(), (Dtype)scale, top_data); if (!use_square_) caffe_gpu_powx(top_count, top_data, (Dtype)0.5, top_data); caffe_gpu_scal(top_count, (Dtype)scale, top_data); //if (compute_dist_ && bottom[0]->data() == bottom[1]->data()) if (is_self_dist_) { /*set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top[0]->num()), CAFFE_CUDA_NUM_THREADS >> >(top[0]->num(), top_data);*/ //delete_diag<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(top_count, top[0]->num(), top_data, top[0]->mutable_gpu_data()); set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top[0]->num()), CAFFE_CUDA_NUM_THREADS >> >(top[0]->num(), top_data); caffe_copy(top[0]->count(), top_data, top[0]->mutable_gpu_data()); } } template <typename Dtype> void ClusterCentroidDistLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* centroid_diff = compute_dist_ ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff(); if (!compute_dist_) { if (is_sample_base_cls) { } else{ if (!initialized_) { caffe_gpu_set(bottom[0]->count(), (Dtype)0, bottom_diff); caffe_gpu_set(this->blobs_[0]->count(), (Dtype)0, centroid_diff); return; } } } //const Dtype* top_diff = temp_diff_.gpu_data(); //caffe_gpu_div(top[0]->count(), top[0]->gpu_diff(), top[0]->gpu_data(), top_cache_.mutable_gpu_data()); //caffe_gpu_scal(top[0]->count(), (Dtype)(scale / 2.0), top_cache_.mutable_gpu_data()); //if (compute_dist_ && bottom[0]->data() == bottom[1]->data()) //{ // set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top_cache_.num()), CAFFE_CUDA_NUM_THREADS >> >(top_cache_.num(), top_cache_.mutable_gpu_data()); //} const int top_count = is_self_dist_ ? top_cache_.count() : top[0]->count(); if (is_self_dist_) { //expand_diag<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(top_count, top[0]->num(), top_cache_.mutable_gpu_diff(), top[0]->gpu_diff()); caffe_copy(top[0]->count(), top[0]->gpu_diff(), top_cache_.mutable_gpu_diff()); if (!use_square_) { caffe_gpu_div(top_count, top_cache_.gpu_diff(), top_cache_.gpu_data(), top_cache_.mutable_gpu_diff()); caffe_gpu_scal(top_count, (Dtype)(scale / 2.0), top_cache_.mutable_gpu_diff()); } set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top_cache_.num()), CAFFE_CUDA_NUM_THREADS >> >(top_cache_.num(), top_cache_.mutable_gpu_diff()); } else{ if (!use_square_) { caffe_gpu_div(top[0]->count(), top[0]->gpu_diff(), top[0]->gpu_data(), top_cache_.mutable_gpu_data()); caffe_gpu_scal(top[0]->count(), (Dtype)(scale / 2.0), top_cache_.mutable_gpu_data()); } else caffe_copy(top[0]->count(), top[0]->gpu_diff(), top_cache_.mutable_gpu_data()); } const Dtype* top_diff = is_self_dist_ ? top_cache_.gpu_diff() : top_cache_.gpu_data(); //top[0]->gpu_diff(); //const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* centroid_data = compute_dist_ ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); const Dtype* top_data = is_self_dist_ ? top_cache_.gpu_data() : top[0]->gpu_data(); const int count_blobs_ = compute_dist_ ? bottom[1]->count() : this->blobs_[0]->count(); //const Dtype* bottom_data = bottom_cache_.gpu_diff(); //= bottom[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); //**************propagate diff to centroid*************** // dot top_diff with feat data. if ((compute_dist_ && propagate_down[1]) || (!compute_dist_ && this->param_propagate_down_[0])){ caffe_gpu_gemm( CblasTrans, CblasNoTrans, num_cluster_, centroid_dim_, top[0]->num(), (Dtype)scale,// / centroid_dim_, top_diff, //bottom[0]->gpu_data(), bottom_data, (Dtype)0.0, square_cluster_.mutable_gpu_data() ); // sum top_diff along num caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, 1, num_cluster_, top[0]->num(), (Dtype)scale, // / centroid_dim_, ones_.gpu_data(), top_diff, (Dtype)0.0, column_.mutable_gpu_data() ); // expand top_diff along centroid_dim_. caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_cluster_, centroid_dim_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, //centroid_diff square_cluster_.mutable_gpu_diff() ); // multipy with centroid data //caffe_gpu_mul(this->blobs_[0]->count(), centroid_diff, centroid_data, centroid_diff); caffe_gpu_mul(count_blobs_, square_cluster_.mutable_gpu_diff(), centroid_data, square_cluster_.mutable_gpu_diff()); // sum all diff caffe_gpu_sub(count_blobs_, square_cluster_.mutable_gpu_diff(), square_cluster_.gpu_data(), square_cluster_.mutable_gpu_diff()); caffe_gpu_add(count_blobs_, square_cluster_.mutable_gpu_diff(), centroid_diff, centroid_diff); } if (propagate_down[0]) { //**************propagate diff to feat data************** // dot top_diff with centroid data caffe_gpu_gemm( CblasNoTrans, //CblasTrans, CblasNoTrans, top[0]->num(), centroid_dim_, num_cluster_, (Dtype)scale, // / centroid_dim_, top_diff, centroid_data, (Dtype)0.0, square_feat_.mutable_gpu_data() ); // sum top_diff along num_cluster_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, top[0]->num(), 1, num_cluster_, (Dtype)scale,// / centroid_dim_, top_diff, ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data() ); //expand top_diff along centroid_dim_. caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, top[0]->num(), centroid_dim_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, bottom_diff ); //multipy with centroid data. caffe_gpu_mul(bottom[0]->count(), bottom_diff, bottom_data/*bottom[0]->gpu_data()*/, bottom_diff); //sum all diff caffe_gpu_sub(bottom[0]->count(), bottom_diff, square_feat_.gpu_data(), bottom_diff); } //Dtype scalar_factor = (top[0]->asum_diff() / top[0]->count()) / (bottom[0]->asum_diff() / bottom[0]->count());//top_cache_.asum_data(); //LOG(INFO) << "scale:" << scalar_factor << "," << bottom[0]->cpu_diff()[0] << "," << top[0]->cpu_diff()[0] << "," << top[0]->cpu_data()[0]; //caffe_gpu_scal(top[0]->count(), (Dtype)scalar_factor, bottom[0]->mutable_gpu_diff()); //caffe_gpu_div(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_data(), bottom_diff); //caffe_gpu_mul(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_diff(), bottom_cache_.mutable_gpu_data()); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // 1, // centroid_dim_, // (Dtype)1.0, // bottom_cache_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // column_.mutable_gpu_data() // ); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // centroid_dim_, // 1, // (Dtype)1.0, // column_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_data() // ); //caffe_gpu_mul(bottom[0]->count(), bottom_cache_.gpu_diff(), bottom_cache_.gpu_data(), bottom_cache_.mutable_gpu_data()); //if (use_T_) //{ // caffe_gpu_scal(bottom[0]->count(), (Dtype)(1.0 / T), bottom_cache_.mutable_gpu_data()); // caffe_gpu_scal(bottom[0]->count(), (Dtype)( T), bottom_diff); //} //caffe_gpu_sub(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_data(), bottom_diff); ////////debug //for (int i = 0; i < 20; ++i) // LOG(INFO) <<"#"<<i<<": " << this->blobs_[0]->cpu_data()[i] << "," << this->blobs_[0]->cpu_diff()[i] << "," // << bottom_cache_.cpu_diff()[i] <<"," <<bottom[0]->cpu_data()[i] << "," << bottom[0]->cpu_diff()[i] << "," << top[0]->cpu_data()[i] << "," << top[0]->cpu_diff()[i]; //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // top[0]->num(), // centroid_dim_, // num_cluster_, // (Dtype)1.0*scale, // top_diff, // this->blobs_[1]->gpu_data(), // (Dtype)0.0, // square_feat_.mutable_gpu_data()); ////multipy feat data //caffe_gpu_mul(bottom[0]->count(), // square_feat_.gpu_data(), // bottom[0]->gpu_data(), // square_feat_.mutable_gpu_data()); //caffe_gpu_mul( // this->blobs_[0]->count(), // centroid_data, // this->blobs_[1]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////dot diff of feat //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // top[0]->num(), // centroid_dim_, num_cluster_, // (Dtype)-1.0*scale, // top_diff, // square_cluster_.gpu_data(), // (Dtype)0.0, bottom_diff); ////dot diff of centroid //problem? //caffe_gpu_gemm(CblasTrans, // CblasNoTrans, // num_cluster_, // centroid_dim_, // top[0]->num(), // (Dtype)-1.0*scale, // top_diff, top_data, // (Dtype)0.0, // centroid_diff); //caffe_gpu_mul(this->blobs_[0]->count(), // centroid_diff, // this->blobs_[1]->gpu_data(), // centroid_diff); ////sum diff along feat num //caffe_gpu_gemm(CblasNoTrans, // CblasNoTrans, // 1, // num_cluster_, // top[0]->num(), // (Dtype)1.0*scale, // ones_.gpu_data(), // top_diff,(Dtype)0.0, // column_.mutable_gpu_data()); ////span diff along centroid dim //caffe_gpu_gemm(CblasNoTrans, // CblasNoTrans, // num_cluster_, // centroid_dim_, // 1, // (Dtype)1.0, // column_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // square_cluster_.mutable_gpu_data()); ////multipy centroid data //caffe_gpu_mul( // this->blobs_[0]->count(), // square_cluster_.gpu_data(), // this->blobs_[0]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////multiply std normalizar. //caffe_gpu_mul( // this->blobs_[0]->count(), // square_cluster_.gpu_data(), // this->blobs_[1]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////sum all diff. //caffe_gpu_add( // bottom[0]->count(), // bottom_diff, // square_feat_.gpu_data(), // bottom_diff); //caffe_gpu_add( // this->blobs_[0]->count(), // centroid_diff, // square_cluster_.gpu_data(), // centroid_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ClusterCentroidDistLayer); }
147156b20d10015d6611455c0e00072014885555.cu
#include "caffe/layers/cluster_centroid_dist_layer.hpp" #include <vector> namespace caffe{ //template <typename Dtype> //__global__ void eucliean_vec_forward(const int n, const int num, const int num_cluster, // const int feat_dim, const Dtype* bottom_data, const Dtype* centroid_data, // Dtype* top_data) //{ // CUDA_KERNEL_LOOP(index, n) { // int k_idx = n % num_cluster; // int n_idx = n / num_cluster; // const Dtype* bottom_ptr = bottom_data + n_idx*feat_dim; // const Dtype* centroid_ptr = centroid_data + k_idx*feat_dim; // Dtype* top_ptr = top_data + n; // top_ptr[0] = 0; // for (int i = 0; i < feat_dim; ++i) // { // top_ptr[0] += 0.5*(bottom_ptr[i] - centroid_ptr[i])*(bottom_ptr[i] - centroid_ptr[i]); // } // } //} template <typename Dtype> __global__ void set_diag_zero(const int n, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { int idx = index*n + index; y[idx] = 0; } } template <typename Dtype> __global__ void delete_diag(const int n, const int sqrtN, Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { //int idx = index*n + index; //y[idx] = 0; int dimx = index % sqrtN; int dimy = index / sqrtN; if (dimx != dimy) { if (dimx > dimy) { int idx = dimy*(sqrtN - 1) + dimx - 1; y[idx] = x[index]; } else{ int idx = dimy*(sqrtN - 1) + dimx; y[idx] = x[index]; } } else { x[index] = 0; } } } template <typename Dtype> __global__ void expand_diag(const int n, const int sqrtN, Dtype* x, const Dtype* y) { CUDA_KERNEL_LOOP(index, n) { //int idx = index*n + index; //y[idx] = 0; int dimx = index % sqrtN; int dimy = index / sqrtN; if (dimx != dimy) { if (dimx > dimy) { int idx = dimy*(sqrtN - 1) + dimx - 1; x[index] = y[idx]; } else{ int idx = dimy*(sqrtN - 1) + dimx; x[index] = y[idx]; } } else{ x[index] = 0; } } } template <typename Dtype> void ClusterCentroidDistLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //caffe_gpu_mul(bottom[0]->count(), bottom[0]->gpu_data(), bottom[0]->gpu_data(), bottom_cache_.mutable_gpu_data()); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // 1, // centroid_dim_, // (Dtype)1.0, // bottom_cache_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_diff() // ); //caffe_gpu_powx(bottom[0]->num(), bottom_cache_.gpu_diff(), (Dtype)0.5, bottom_cache_.mutable_gpu_diff()); //caffe_gpu_gemm( // CblasNoTrans, CblasNoTrans, // bottom[0]->num(), // centroid_dim_, // 1, // (Dtype)1.0, // bottom_cache_.gpu_diff(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_data() // ); //caffe_gpu_div(bottom[0]->count(), bottom[0]->gpu_data(), bottom_cache_.gpu_data(), bottom_cache_.mutable_gpu_diff()); //if (use_T_) // caffe_gpu_scal(bottom[0]->count(), (Dtype)T, bottom_cache_.mutable_gpu_diff()); //const Dtype* bottom_data = bottom_cache_.gpu_diff(); //= bottom[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = is_self_dist_ ? top_cache_.mutable_gpu_data() : top[0]->mutable_gpu_data(); const Dtype* centroid_data = compute_dist_ ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); const int count_blobs_ = compute_dist_ ? bottom[1]->count() : this->blobs_[0]->count(); const int top_count = is_self_dist_ ? top_cache_.count() : top[0]->count(); if (!compute_dist_) { if (is_sample_base_cls) { caffe_copy(bottom[0]->count(), bottom_data, this->blobs_[0]->mutable_gpu_data()); for (int i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = i; } } else { if (!initialized_) { if (init_count_ >= this->blobs_[0]->count()) { initialized_ = true; LOG(INFO) << "intial centroid complete."; } else { int count = min(bottom[0]->count(), this->blobs_[0]->count() - init_count_); caffe_copy(count, bottom_data, this->blobs_[0]->mutable_gpu_data() + init_count_); caffe_rng_gaussian<Dtype>(top[0]->count(), Dtype(0), Dtype(1), top[0]->mutable_cpu_data()); init_count_ += count; LOG(INFO) << init_count_; return; } } } } // square of data. caffe_gpu_mul(bottom[0]->count(), bottom_data, bottom_data, square_feat_.mutable_gpu_data()); // sum along centroid_dim_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_samp_, //bottom[0]->num(), 1, centroid_dim_, (Dtype)0.5, // / centroid_dim_, square_feat_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data()); // span along num_cluster_ dim caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, //bottom[0]->num(), num_samp_, num_cluster_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, top_data ); // dot product of centroid and feat caffe_gpu_gemm( CblasNoTrans, CblasTrans, //bottom[0]->num(), num_samp_, num_cluster_, centroid_dim_, (Dtype)-1.0, // / centroid_dim_, bottom_data, centroid_data, (Dtype)1.0, top_data); //square of centroid. caffe_gpu_mul( count_blobs_, centroid_data, centroid_data, square_cluster_.mutable_gpu_data()); //sum along centroid_dim_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_cluster_, 1, centroid_dim_, (Dtype)1.0, // / centroid_dim_, square_cluster_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data()); //span along feat num caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, //bottom[0]->num(), num_samp_, num_cluster_, 1, (Dtype)0.5, ones_.gpu_data(), column_.gpu_data(), (Dtype)1.0, top_data);//cache_cluster_.mutable_gpu_data()); //caffe_gpu_powx(top[0]->count(), top_data, (Dtype)0.5, top_data); //caffe_gpu_scal(top[0]->count(), (Dtype)scale, top_data); if (!use_square_) caffe_gpu_powx(top_count, top_data, (Dtype)0.5, top_data); caffe_gpu_scal(top_count, (Dtype)scale, top_data); //if (compute_dist_ && bottom[0]->data() == bottom[1]->data()) if (is_self_dist_) { /*set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top[0]->num()), CAFFE_CUDA_NUM_THREADS >> >(top[0]->num(), top_data);*/ //delete_diag<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(top_count, top[0]->num(), top_data, top[0]->mutable_gpu_data()); set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top[0]->num()), CAFFE_CUDA_NUM_THREADS >> >(top[0]->num(), top_data); caffe_copy(top[0]->count(), top_data, top[0]->mutable_gpu_data()); } } template <typename Dtype> void ClusterCentroidDistLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* centroid_diff = compute_dist_ ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff(); if (!compute_dist_) { if (is_sample_base_cls) { } else{ if (!initialized_) { caffe_gpu_set(bottom[0]->count(), (Dtype)0, bottom_diff); caffe_gpu_set(this->blobs_[0]->count(), (Dtype)0, centroid_diff); return; } } } //const Dtype* top_diff = temp_diff_.gpu_data(); //caffe_gpu_div(top[0]->count(), top[0]->gpu_diff(), top[0]->gpu_data(), top_cache_.mutable_gpu_data()); //caffe_gpu_scal(top[0]->count(), (Dtype)(scale / 2.0), top_cache_.mutable_gpu_data()); //if (compute_dist_ && bottom[0]->data() == bottom[1]->data()) //{ // set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top_cache_.num()), CAFFE_CUDA_NUM_THREADS >> >(top_cache_.num(), top_cache_.mutable_gpu_data()); //} const int top_count = is_self_dist_ ? top_cache_.count() : top[0]->count(); if (is_self_dist_) { //expand_diag<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(top_count, top[0]->num(), top_cache_.mutable_gpu_diff(), top[0]->gpu_diff()); caffe_copy(top[0]->count(), top[0]->gpu_diff(), top_cache_.mutable_gpu_diff()); if (!use_square_) { caffe_gpu_div(top_count, top_cache_.gpu_diff(), top_cache_.gpu_data(), top_cache_.mutable_gpu_diff()); caffe_gpu_scal(top_count, (Dtype)(scale / 2.0), top_cache_.mutable_gpu_diff()); } set_diag_zero<Dtype> << <CAFFE_GET_BLOCKS(top_cache_.num()), CAFFE_CUDA_NUM_THREADS >> >(top_cache_.num(), top_cache_.mutable_gpu_diff()); } else{ if (!use_square_) { caffe_gpu_div(top[0]->count(), top[0]->gpu_diff(), top[0]->gpu_data(), top_cache_.mutable_gpu_data()); caffe_gpu_scal(top[0]->count(), (Dtype)(scale / 2.0), top_cache_.mutable_gpu_data()); } else caffe_copy(top[0]->count(), top[0]->gpu_diff(), top_cache_.mutable_gpu_data()); } const Dtype* top_diff = is_self_dist_ ? top_cache_.gpu_diff() : top_cache_.gpu_data(); //top[0]->gpu_diff(); //const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* centroid_data = compute_dist_ ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); const Dtype* top_data = is_self_dist_ ? top_cache_.gpu_data() : top[0]->gpu_data(); const int count_blobs_ = compute_dist_ ? bottom[1]->count() : this->blobs_[0]->count(); //const Dtype* bottom_data = bottom_cache_.gpu_diff(); //= bottom[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); //**************propagate diff to centroid*************** // dot top_diff with feat data. if ((compute_dist_ && propagate_down[1]) || (!compute_dist_ && this->param_propagate_down_[0])){ caffe_gpu_gemm( CblasTrans, CblasNoTrans, num_cluster_, centroid_dim_, top[0]->num(), (Dtype)scale,// / centroid_dim_, top_diff, //bottom[0]->gpu_data(), bottom_data, (Dtype)0.0, square_cluster_.mutable_gpu_data() ); // sum top_diff along num caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, 1, num_cluster_, top[0]->num(), (Dtype)scale, // / centroid_dim_, ones_.gpu_data(), top_diff, (Dtype)0.0, column_.mutable_gpu_data() ); // expand top_diff along centroid_dim_. caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, num_cluster_, centroid_dim_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, //centroid_diff square_cluster_.mutable_gpu_diff() ); // multipy with centroid data //caffe_gpu_mul(this->blobs_[0]->count(), centroid_diff, centroid_data, centroid_diff); caffe_gpu_mul(count_blobs_, square_cluster_.mutable_gpu_diff(), centroid_data, square_cluster_.mutable_gpu_diff()); // sum all diff caffe_gpu_sub(count_blobs_, square_cluster_.mutable_gpu_diff(), square_cluster_.gpu_data(), square_cluster_.mutable_gpu_diff()); caffe_gpu_add(count_blobs_, square_cluster_.mutable_gpu_diff(), centroid_diff, centroid_diff); } if (propagate_down[0]) { //**************propagate diff to feat data************** // dot top_diff with centroid data caffe_gpu_gemm( CblasNoTrans, //CblasTrans, CblasNoTrans, top[0]->num(), centroid_dim_, num_cluster_, (Dtype)scale, // / centroid_dim_, top_diff, centroid_data, (Dtype)0.0, square_feat_.mutable_gpu_data() ); // sum top_diff along num_cluster_ caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, top[0]->num(), 1, num_cluster_, (Dtype)scale,// / centroid_dim_, top_diff, ones_.gpu_data(), (Dtype)0.0, column_.mutable_gpu_data() ); //expand top_diff along centroid_dim_. caffe_gpu_gemm( CblasNoTrans, CblasNoTrans, top[0]->num(), centroid_dim_, 1, (Dtype)1.0, column_.gpu_data(), ones_.gpu_data(), (Dtype)0.0, bottom_diff ); //multipy with centroid data. caffe_gpu_mul(bottom[0]->count(), bottom_diff, bottom_data/*bottom[0]->gpu_data()*/, bottom_diff); //sum all diff caffe_gpu_sub(bottom[0]->count(), bottom_diff, square_feat_.gpu_data(), bottom_diff); } //Dtype scalar_factor = (top[0]->asum_diff() / top[0]->count()) / (bottom[0]->asum_diff() / bottom[0]->count());//top_cache_.asum_data(); //LOG(INFO) << "scale:" << scalar_factor << "," << bottom[0]->cpu_diff()[0] << "," << top[0]->cpu_diff()[0] << "," << top[0]->cpu_data()[0]; //caffe_gpu_scal(top[0]->count(), (Dtype)scalar_factor, bottom[0]->mutable_gpu_diff()); //caffe_gpu_div(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_data(), bottom_diff); //caffe_gpu_mul(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_diff(), bottom_cache_.mutable_gpu_data()); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // 1, // centroid_dim_, // (Dtype)1.0, // bottom_cache_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // column_.mutable_gpu_data() // ); //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // bottom[0]->num(), // centroid_dim_, // 1, // (Dtype)1.0, // column_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // bottom_cache_.mutable_gpu_data() // ); //caffe_gpu_mul(bottom[0]->count(), bottom_cache_.gpu_diff(), bottom_cache_.gpu_data(), bottom_cache_.mutable_gpu_data()); //if (use_T_) //{ // caffe_gpu_scal(bottom[0]->count(), (Dtype)(1.0 / T), bottom_cache_.mutable_gpu_data()); // caffe_gpu_scal(bottom[0]->count(), (Dtype)( T), bottom_diff); //} //caffe_gpu_sub(bottom[0]->count(), bottom_diff, bottom_cache_.gpu_data(), bottom_diff); ////////debug //for (int i = 0; i < 20; ++i) // LOG(INFO) <<"#"<<i<<": " << this->blobs_[0]->cpu_data()[i] << "," << this->blobs_[0]->cpu_diff()[i] << "," // << bottom_cache_.cpu_diff()[i] <<"," <<bottom[0]->cpu_data()[i] << "," << bottom[0]->cpu_diff()[i] << "," << top[0]->cpu_data()[i] << "," << top[0]->cpu_diff()[i]; //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // top[0]->num(), // centroid_dim_, // num_cluster_, // (Dtype)1.0*scale, // top_diff, // this->blobs_[1]->gpu_data(), // (Dtype)0.0, // square_feat_.mutable_gpu_data()); ////multipy feat data //caffe_gpu_mul(bottom[0]->count(), // square_feat_.gpu_data(), // bottom[0]->gpu_data(), // square_feat_.mutable_gpu_data()); //caffe_gpu_mul( // this->blobs_[0]->count(), // centroid_data, // this->blobs_[1]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////dot diff of feat //caffe_gpu_gemm( // CblasNoTrans, // CblasNoTrans, // top[0]->num(), // centroid_dim_, num_cluster_, // (Dtype)-1.0*scale, // top_diff, // square_cluster_.gpu_data(), // (Dtype)0.0, bottom_diff); ////dot diff of centroid //problem? //caffe_gpu_gemm(CblasTrans, // CblasNoTrans, // num_cluster_, // centroid_dim_, // top[0]->num(), // (Dtype)-1.0*scale, // top_diff, top_data, // (Dtype)0.0, // centroid_diff); //caffe_gpu_mul(this->blobs_[0]->count(), // centroid_diff, // this->blobs_[1]->gpu_data(), // centroid_diff); ////sum diff along feat num //caffe_gpu_gemm(CblasNoTrans, // CblasNoTrans, // 1, // num_cluster_, // top[0]->num(), // (Dtype)1.0*scale, // ones_.gpu_data(), // top_diff,(Dtype)0.0, // column_.mutable_gpu_data()); ////span diff along centroid dim //caffe_gpu_gemm(CblasNoTrans, // CblasNoTrans, // num_cluster_, // centroid_dim_, // 1, // (Dtype)1.0, // column_.gpu_data(), // ones_.gpu_data(), // (Dtype)0.0, // square_cluster_.mutable_gpu_data()); ////multipy centroid data //caffe_gpu_mul( // this->blobs_[0]->count(), // square_cluster_.gpu_data(), // this->blobs_[0]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////multiply std normalizar. //caffe_gpu_mul( // this->blobs_[0]->count(), // square_cluster_.gpu_data(), // this->blobs_[1]->gpu_data(), // square_cluster_.mutable_gpu_data()); ////sum all diff. //caffe_gpu_add( // bottom[0]->count(), // bottom_diff, // square_feat_.gpu_data(), // bottom_diff); //caffe_gpu_add( // this->blobs_[0]->count(), // centroid_diff, // square_cluster_.gpu_data(), // centroid_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ClusterCentroidDistLayer); }
ca1534a3456f731a8bb4ad9db7be4e67e78c2025.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_c #include "gemv_template_kernel_batched_hip.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA_array Array of pointers, dimension (batchCount). Each is a COMPLEX array A of DIMENSION ( ldda, n ) on the GPU @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx_array Array of pointers, dimension (batchCount). Each is a COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX On entry, ALPHA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy_array Array of pointers, dimension (batchCount). Each is a COMPLEX array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_sblas2 ********************************************************************/ extern "C" void magmablas_cgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_ptr dA_array[], magma_int_t ldda, magmaFloatComplex_ptr dx_array[], magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy_array[], magma_int_t incy, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( trans == MagmaNoTrans ) { if (max(m, n) <= 96) { // small size if (m < n) { // Fat matrix if ( m <= 16) { gemvn_template_batched<magmaFloatComplex, version(N, 70)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if ( m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 100)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if ( m <= 64) { gemvn_template_batched<magmaFloatComplex, version(N, 117)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 131)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else { // Tall or square matrix if ( n <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 129)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 131)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } } else { // big size if (m < n) { // Fat matrix if (m <= 8) { gemvn_template_batched<magmaFloatComplex, version(N, 36)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 16) { gemvn_template_batched<magmaFloatComplex, version(N, 70)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 100)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 116)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 133)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else { // Tall or square matrix if (m <= 256) { gemvn_template_batched<magmaFloatComplex, version(N, 137)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 140)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } }// big size } else { if (max(m, n) <= 96) // small size { if (n <= 8) { gemvc_template_batched<magmaFloatComplex, version(T, 42)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 46)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else // big size { if (m <= n) // Fat or square matrix { if (m <= 64) { gemvc_template_batched<magmaFloatComplex, version(T, 47)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 90)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else// (m > n) Tall matrix { if (n <= 8) { gemvc_template_batched<magmaFloatComplex, version(T, 130)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 90)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////
ca1534a3456f731a8bb4ad9db7be4e67e78c2025.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_c #include "gemv_template_kernel_batched.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA_array Array of pointers, dimension (batchCount). Each is a COMPLEX array A of DIMENSION ( ldda, n ) on the GPU @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx_array Array of pointers, dimension (batchCount). Each is a COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX On entry, ALPHA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy_array Array of pointers, dimension (batchCount). Each is a COMPLEX array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_sblas2 ********************************************************************/ extern "C" void magmablas_cgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_ptr dA_array[], magma_int_t ldda, magmaFloatComplex_ptr dx_array[], magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy_array[], magma_int_t incy, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( trans == MagmaNoTrans ) { if (max(m, n) <= 96) { // small size if (m < n) { // Fat matrix if ( m <= 16) { gemvn_template_batched<magmaFloatComplex, version(N, 70)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if ( m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 100)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if ( m <= 64) { gemvn_template_batched<magmaFloatComplex, version(N, 117)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 131)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else { // Tall or square matrix if ( n <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 129)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 131)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } } else { // big size if (m < n) { // Fat matrix if (m <= 8) { gemvn_template_batched<magmaFloatComplex, version(N, 36)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 16) { gemvn_template_batched<magmaFloatComplex, version(N, 70)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 100)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else if (m <= 32) { gemvn_template_batched<magmaFloatComplex, version(N, 116)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 133)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else { // Tall or square matrix if (m <= 256) { gemvn_template_batched<magmaFloatComplex, version(N, 137)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvn_template_batched<magmaFloatComplex, version(N, 140)> ( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } }// big size } else { if (max(m, n) <= 96) // small size { if (n <= 8) { gemvc_template_batched<magmaFloatComplex, version(T, 42)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 46)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else // big size { if (m <= n) // Fat or square matrix { if (m <= 64) { gemvc_template_batched<magmaFloatComplex, version(T, 47)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 90)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } else// (m > n) Tall matrix { if (n <= 8) { gemvc_template_batched<magmaFloatComplex, version(T, 130)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } else { gemvc_template_batched<magmaFloatComplex, version(T, 90)> ( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue ); } } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////
24f1df17385cdd570e50ee1f0236b2695e9fc68f.hip
// !!! This is a file automatically generated by hipify!!! #include "Search.cuh" void generateQueryMask(char *query, QueryMask *queryMask) { //Add in transpose of query size_t queryLength = std::strlen(query); size_t maskCount = (queryLength - 1) / MASK_SIZE + 1; MASK_TYPE* matchMask; hipError_t err = hipMallocManaged(&matchMask, 4 * sizeof(MASK_TYPE) * maskCount); if (err != hipSuccess) printf("On Malloc Query Mask: %s\n", hipGetErrorString(err)); size_t start_index = (maskCount * MASK_SIZE) - queryLength;//location of first active byte for (int i = start_index; i < queryLength + start_index; i++) { int maskX = i / MASK_SIZE; //The current char being worked on matchMask[0 * maskCount + maskX] <<= 1; matchMask[1 * maskCount + maskX] <<= 1; matchMask[2 * maskCount + maskX] <<= 1; matchMask[3 * maskCount + maskX] <<= 1; char c = tolower(query[i - start_index]); switch (c) { case 'a': matchMask[0 * maskCount + maskX] |= 1; break; case 'c': matchMask[1 * maskCount + maskX] |= 1; break; case 'g': matchMask[2 * maskCount + maskX] |= 1; break; case 't': matchMask[3 * maskCount + maskX] |= 1; break; default: printf("Unrecognized query character: %c\n", c); } } queryMask->maskCount = maskCount; queryMask->queryLength = queryLength; queryMask->matchMask = matchMask; }
24f1df17385cdd570e50ee1f0236b2695e9fc68f.cu
#include "Search.cuh" void generateQueryMask(char *query, QueryMask *queryMask) { //Add in transpose of query size_t queryLength = std::strlen(query); size_t maskCount = (queryLength - 1) / MASK_SIZE + 1; MASK_TYPE* matchMask; cudaError_t err = cudaMallocManaged(&matchMask, 4 * sizeof(MASK_TYPE) * maskCount); if (err != cudaSuccess) printf("On Malloc Query Mask: %s\n", cudaGetErrorString(err)); size_t start_index = (maskCount * MASK_SIZE) - queryLength;//location of first active byte for (int i = start_index; i < queryLength + start_index; i++) { int maskX = i / MASK_SIZE; //The current char being worked on matchMask[0 * maskCount + maskX] <<= 1; matchMask[1 * maskCount + maskX] <<= 1; matchMask[2 * maskCount + maskX] <<= 1; matchMask[3 * maskCount + maskX] <<= 1; char c = tolower(query[i - start_index]); switch (c) { case 'a': matchMask[0 * maskCount + maskX] |= 1; break; case 'c': matchMask[1 * maskCount + maskX] |= 1; break; case 'g': matchMask[2 * maskCount + maskX] |= 1; break; case 't': matchMask[3 * maskCount + maskX] |= 1; break; default: printf("Unrecognized query character: %c\n", c); } } queryMask->maskCount = maskCount; queryMask->queryLength = queryLength; queryMask->matchMask = matchMask; }
caab2eef727e88acee89fb9cf37f441be00f8622.hip
// !!! This is a file automatically generated by hipify!!! // MIT License // Copyright (c) 2018 SqrtPapere and Luca Angioloni // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <iostream> #include <hip/hip_runtime.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #include <sys/time.h> #include "input_parser.h" using namespace std; int print = 0; void print_help(){ std::cout << "usage: cuda -p <input image path> [-t <number of threads (int) (default:platform dependent)>] [-json]" << std::endl << std::endl; std::cout << "To see this menu again: cuda -h" << std::endl; } unsigned long * integralImage(uint8_t*x, int n, int m){ unsigned long * out = (unsigned long *)malloc(n*m*sizeof(unsigned long)); for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { unsigned long val = x[i*m + j]; if (i>=1) { val += out[(i-1)*m + j]; if (j>=1) { val += out[i*m + j - 1] - out[(i-1)*m + j - 1]; } } else { if (j>=1) { val += out[i*m + j -1]; } } out[i*m + j] = val; } } return out; } __global__ void sum_rows(unsigned long *a, unsigned long *b, int rowsTotal, int colsTotal, int n_thread) { // Thread Ids equal to block Ids because the each blocks contains one thread only. //int col = blockIdx.x; int row = blockIdx.x; int size_per_thread = rowsTotal/n_thread; int start = row*size_per_thread; int end = start + size_per_thread; if (row >=rowsTotal){ return; } if (row==n_thread-1) { start = (n_thread-1)*size_per_thread; end = rowsTotal; } for (int k = start; k < end; ++k) { for (int j = 0; j < colsTotal; ++j) { if (j >=1) { b[k*colsTotal + j] = a[k*colsTotal + j] + b[k*colsTotal + j - 1]; } else { b[k*colsTotal + j] = a[k*colsTotal + j]; } } } } __global__ void sum_columns(unsigned long *a, unsigned long *b, int rowsTotal, int colsTotal, int n_thread) { // Thread Ids equal to block Ids because the each blocks contains one thread only. int col = blockIdx.x; //int row = blockIdx.y; int size_per_thread = colsTotal/n_thread; int start = col*size_per_thread; int end = start + size_per_thread; if (col >=colsTotal){ return; } if (col==n_thread-1) { start = (n_thread-1)*size_per_thread; end = colsTotal; } for (int k = start; k < end; ++k) { for (int i = 0; i < rowsTotal; ++i) { if (i >=1) { b[i*colsTotal + k] = a[i*colsTotal + k] + b[(i-1)*colsTotal + k]; } else { b[i*colsTotal + k] = a[i*colsTotal + k]; } } } } int main(int argc, char **argv) { InputParser input(argc, argv); if(input.cmdOptionExists("-h")){ print_help(); return 0; } if(input.cmdOptionExists("-p")){ std::string in_file = input.getCmdOption("-p"); if (in_file == "") { std::cout << "No input file!\n\n"; print_help(); return 2; } bool json = input.cmdOptionExists("-json"); int width, height, bpp; uint8_t* matrix_a = stbi_load(in_file.c_str(), &width, &height, &bpp, 1); int total_e = width*height; int widthstep = total_e*sizeof(unsigned long); unsigned long * a = (unsigned long *)malloc(widthstep); for (int i = 0; i < width *height; ++i) { a[i] = (unsigned long)matrix_a[i]; } if (print==1) { cout << "Input"<<endl; for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { cout << a[r*width+c]<<" "; } cout << endl; } } if(!json){ std::cout << "w: " << width << " h: " << height << " b: " << bpp << std::endl; std::cout << "Calculating Integral Image..." << std::endl; } unsigned long * matrix_b= (unsigned long *)malloc(widthstep); unsigned long * matrix_t= (unsigned long *)malloc(widthstep); for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { matrix_b[r*width+c]=0; matrix_t[r*width+c]=0; } } if(!json){ std::cout << "Copied image" << std::endl; } unsigned long * d_matrix_a, * d_matrix_b, * d_matrix_t; hipMalloc(&d_matrix_a,widthstep); hipMalloc(&d_matrix_b,widthstep); hipMalloc(&d_matrix_t,widthstep); hipMemcpy(d_matrix_a,a,widthstep,hipMemcpyHostToDevice); hipMemcpy(d_matrix_b,matrix_b,widthstep,hipMemcpyHostToDevice); hipMemcpy(d_matrix_t,matrix_t,widthstep,hipMemcpyHostToDevice); if(!json){ std::cout << "starting cuda" << std::endl; } struct timeval start, end; gettimeofday(&start, NULL); int num_thread_h = 0; int num_thread_w = 0; if(input.cmdOptionExists("-t")){ std::string num_threads_string = input.getCmdOption("-t"); if (num_threads_string != ""){ num_thread_h = atoi(num_threads_string.c_str()); num_thread_w = num_thread_h; } else { num_thread_h = height; num_thread_w = width; } } else { num_thread_h = height; num_thread_w = width; } hipLaunchKernelGGL(( sum_rows), dim3(num_thread_h),dim3(1), 0, 0, d_matrix_a, d_matrix_t,height,width, num_thread_h); hipLaunchKernelGGL(( sum_columns), dim3(num_thread_w),dim3(1), 0, 0, d_matrix_t, d_matrix_b,height,width, num_thread_w); hipDeviceSynchronize(); gettimeofday(&end, NULL); double time_tot = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; if(!json){ std::cout << "Total parallel time: " << time_tot <<std::endl; } hipMemcpy(matrix_b,d_matrix_b,widthstep,hipMemcpyDeviceToHost); hipMemcpy(matrix_t,d_matrix_t,widthstep,hipMemcpyDeviceToHost); if (print==1) { cout << "Cuda Output"<<endl; for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { cout << matrix_b[r*width+c]<<" "; } cout << endl; } } if(!json){ std::cout << "starting serial" << std::endl; } gettimeofday(&start, NULL); unsigned long* integral_image = integralImage(matrix_a, height, width); gettimeofday(&end, NULL); double time_tot_serial = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; if(!json){ std::cout << "Total serial time: " << time_tot_serial <<std::endl; std::cout << "finish serial" << std::endl; } int count =0; for (int i = 0; i < width*height; ++i) { if (integral_image[i]!=matrix_b[i]) { //std::cout<<"errore"; count++; } } if(!json){ std::cout<<"Errors "; std::cout<<count; std::cout<<" over "; std::cout<<width*height<<std::endl; } if (json) { std::cout << "{\"time\": " << time_tot << ", \"width\": " << width << ", \"height\": " << height << ", \"errors\": " << count << ", \"time_serial\": " << time_tot_serial << "}" << std::endl; } hipFree(d_matrix_a); hipFree(d_matrix_b); free(a); free(matrix_b); stbi_image_free(matrix_a); return 0; } else { // no valid arguments std::cout << "No input file!\n\n"; print_help(); return 1; } }
caab2eef727e88acee89fb9cf37f441be00f8622.cu
// MIT License // Copyright (c) 2018 SqrtPapere and Luca Angioloni // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <iostream> #include <cuda_runtime.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #include <sys/time.h> #include "input_parser.h" using namespace std; int print = 0; void print_help(){ std::cout << "usage: cuda -p <input image path> [-t <number of threads (int) (default:platform dependent)>] [-json]" << std::endl << std::endl; std::cout << "To see this menu again: cuda -h" << std::endl; } unsigned long * integralImage(uint8_t*x, int n, int m){ unsigned long * out = (unsigned long *)malloc(n*m*sizeof(unsigned long)); for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { unsigned long val = x[i*m + j]; if (i>=1) { val += out[(i-1)*m + j]; if (j>=1) { val += out[i*m + j - 1] - out[(i-1)*m + j - 1]; } } else { if (j>=1) { val += out[i*m + j -1]; } } out[i*m + j] = val; } } return out; } __global__ void sum_rows(unsigned long *a, unsigned long *b, int rowsTotal, int colsTotal, int n_thread) { // Thread Ids equal to block Ids because the each blocks contains one thread only. //int col = blockIdx.x; int row = blockIdx.x; int size_per_thread = rowsTotal/n_thread; int start = row*size_per_thread; int end = start + size_per_thread; if (row >=rowsTotal){ return; } if (row==n_thread-1) { start = (n_thread-1)*size_per_thread; end = rowsTotal; } for (int k = start; k < end; ++k) { for (int j = 0; j < colsTotal; ++j) { if (j >=1) { b[k*colsTotal + j] = a[k*colsTotal + j] + b[k*colsTotal + j - 1]; } else { b[k*colsTotal + j] = a[k*colsTotal + j]; } } } } __global__ void sum_columns(unsigned long *a, unsigned long *b, int rowsTotal, int colsTotal, int n_thread) { // Thread Ids equal to block Ids because the each blocks contains one thread only. int col = blockIdx.x; //int row = blockIdx.y; int size_per_thread = colsTotal/n_thread; int start = col*size_per_thread; int end = start + size_per_thread; if (col >=colsTotal){ return; } if (col==n_thread-1) { start = (n_thread-1)*size_per_thread; end = colsTotal; } for (int k = start; k < end; ++k) { for (int i = 0; i < rowsTotal; ++i) { if (i >=1) { b[i*colsTotal + k] = a[i*colsTotal + k] + b[(i-1)*colsTotal + k]; } else { b[i*colsTotal + k] = a[i*colsTotal + k]; } } } } int main(int argc, char **argv) { InputParser input(argc, argv); if(input.cmdOptionExists("-h")){ print_help(); return 0; } if(input.cmdOptionExists("-p")){ std::string in_file = input.getCmdOption("-p"); if (in_file == "") { std::cout << "No input file!\n\n"; print_help(); return 2; } bool json = input.cmdOptionExists("-json"); int width, height, bpp; uint8_t* matrix_a = stbi_load(in_file.c_str(), &width, &height, &bpp, 1); int total_e = width*height; int widthstep = total_e*sizeof(unsigned long); unsigned long * a = (unsigned long *)malloc(widthstep); for (int i = 0; i < width *height; ++i) { a[i] = (unsigned long)matrix_a[i]; } if (print==1) { cout << "Input"<<endl; for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { cout << a[r*width+c]<<" "; } cout << endl; } } if(!json){ std::cout << "w: " << width << " h: " << height << " b: " << bpp << std::endl; std::cout << "Calculating Integral Image..." << std::endl; } unsigned long * matrix_b= (unsigned long *)malloc(widthstep); unsigned long * matrix_t= (unsigned long *)malloc(widthstep); for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { matrix_b[r*width+c]=0; matrix_t[r*width+c]=0; } } if(!json){ std::cout << "Copied image" << std::endl; } unsigned long * d_matrix_a, * d_matrix_b, * d_matrix_t; cudaMalloc(&d_matrix_a,widthstep); cudaMalloc(&d_matrix_b,widthstep); cudaMalloc(&d_matrix_t,widthstep); cudaMemcpy(d_matrix_a,a,widthstep,cudaMemcpyHostToDevice); cudaMemcpy(d_matrix_b,matrix_b,widthstep,cudaMemcpyHostToDevice); cudaMemcpy(d_matrix_t,matrix_t,widthstep,cudaMemcpyHostToDevice); if(!json){ std::cout << "starting cuda" << std::endl; } struct timeval start, end; gettimeofday(&start, NULL); int num_thread_h = 0; int num_thread_w = 0; if(input.cmdOptionExists("-t")){ std::string num_threads_string = input.getCmdOption("-t"); if (num_threads_string != ""){ num_thread_h = atoi(num_threads_string.c_str()); num_thread_w = num_thread_h; } else { num_thread_h = height; num_thread_w = width; } } else { num_thread_h = height; num_thread_w = width; } sum_rows<<<num_thread_h,1>>>(d_matrix_a, d_matrix_t,height,width, num_thread_h); sum_columns<<<num_thread_w,1>>>(d_matrix_t, d_matrix_b,height,width, num_thread_w); cudaThreadSynchronize(); gettimeofday(&end, NULL); double time_tot = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; if(!json){ std::cout << "Total parallel time: " << time_tot <<std::endl; } cudaMemcpy(matrix_b,d_matrix_b,widthstep,cudaMemcpyDeviceToHost); cudaMemcpy(matrix_t,d_matrix_t,widthstep,cudaMemcpyDeviceToHost); if (print==1) { cout << "Cuda Output"<<endl; for(int r=0;r<height;r++) { for(int c=0; c<width;c++) { cout << matrix_b[r*width+c]<<" "; } cout << endl; } } if(!json){ std::cout << "starting serial" << std::endl; } gettimeofday(&start, NULL); unsigned long* integral_image = integralImage(matrix_a, height, width); gettimeofday(&end, NULL); double time_tot_serial = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; if(!json){ std::cout << "Total serial time: " << time_tot_serial <<std::endl; std::cout << "finish serial" << std::endl; } int count =0; for (int i = 0; i < width*height; ++i) { if (integral_image[i]!=matrix_b[i]) { //std::cout<<"errore"; count++; } } if(!json){ std::cout<<"Errors "; std::cout<<count; std::cout<<" over "; std::cout<<width*height<<std::endl; } if (json) { std::cout << "{\"time\": " << time_tot << ", \"width\": " << width << ", \"height\": " << height << ", \"errors\": " << count << ", \"time_serial\": " << time_tot_serial << "}" << std::endl; } cudaFree(d_matrix_a); cudaFree(d_matrix_b); free(a); free(matrix_b); stbi_image_free(matrix_a); return 0; } else { // no valid arguments std::cout << "No input file!\n\n"; print_help(); return 1; } }
e98f3d8d2ef14cffb4ae5c071ef2905291b35500.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* 2013 * Maciej Szeptuch * II UWr */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <errno.h> #define WORD_MAXLEN 16 #define THREADS 64 #define __CUDA__ #define __CPU__ __device__ __host__ inline int MIN(const int a, const int b) { return a<b?a:b; } __device__ __host__ int LevenshteinDistance(const char *const A, const char *const B); char *loadDictionary(const char *const file, int &words, int &size); void printHead(void); #ifdef __CUDA__ __global__ void LevenshteinCUDA(const char *const dictionary, const int words, const char *const pattern, int *result); #endif // __CUDA__ #ifdef __CPU__ int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern); #endif // __CPU__ int main(const int argc, const char *const* argv) { if(argc < 3) { fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]); return 1; } int dictionarySize = 0, words = 0; char *dictionary = loadDictionary(argv[1], words, dictionarySize); if(!dictionary) { fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno)); return 2; } #ifdef __CUDA__ // GPU INIT char *cudaDictionary = NULL, *cudaPattern = NULL; int *cudaResult = NULL; hipMalloc(&cudaDictionary, dictionarySize * sizeof(char)); hipMalloc(&cudaPattern, WORD_MAXLEN * sizeof(char)); hipMalloc(&cudaResult, 2 * sizeof(int)); hipMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(char), hipMemcpyHostToDevice); #endif // __CUDA__ printHead(); for(int a = 2; a < argc; ++ a) { int result[2] = {1 << 30, 1 << 30}; char pattern[WORD_MAXLEN + 2] = {}; memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(char)); printf(" %-16s | ", argv[a]); #ifdef __CUDA__ { // GPU TEST hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, NULL); hipMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(cudaResult, result, 2 * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( LevenshteinCUDA), dim3((words + THREADS - 1) / THREADS), dim3(THREADS), 0, 0, cudaDictionary, words, cudaPattern, cudaResult); hipMemcpy(result, cudaResult, 2 * sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(end, NULL); hipEventSynchronize(end); float gputotal = 0; hipEventElapsedTime(&gputotal, start, end); printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal); } #endif // __CUDA__ #ifdef __CPU__ { // CPU TEST timeval start, end; gettimeofday(&start, NULL); result[0] = LevenshteinCPU(dictionary, words, pattern); gettimeofday(&end, NULL); float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f; printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], cputotal); } #endif // __CPU__ printf("%d\n", LevenshteinDistance(pattern, &dictionary[result[0] * WORD_MAXLEN])); } #ifdef __CUDA__ hipFree(cudaDictionary); #endif // __CUDA__ free(dictionary); return 0; } char *loadDictionary(const char *const file, int &words, int &size) { FILE *handle = fopen(file, "rb"); if(!handle) return NULL; char *dictionary = NULL, *current = NULL; char buffer[64] = {}; words = 0; while(fgets(buffer, 64, handle)) ++ words; fseek(handle, 0, SEEK_SET); size = words * WORD_MAXLEN; current = dictionary = new char[size]; memset(dictionary, 0, size * sizeof(char)); while(fgets(current, WORD_MAXLEN + 8, handle)) { current[strlen(current) - 1] = 0; // remove \n current[strlen(current) - 1] = 0; // remove \r current += WORD_MAXLEN; } fclose(handle); return dictionary; } #ifdef __CPU__ int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern) { const char *word = dictionary; int best = 1 << 30, r = 0; for(int w = 0; w < words; ++ w, word += WORD_MAXLEN) { int dist = LevenshteinDistance(pattern, word); if(dist < best) { best = dist; r = w; } } return r; } #endif // __CPU__ __device__ __host__ inline int LevenshteinDistance(const char *const A, const char *const B) { int temp[2][WORD_MAXLEN + 2] = {}; int t = 1; for(int a = 0; a <= WORD_MAXLEN; ++ a) temp[0][a] = a; for(int a = 1; a <= WORD_MAXLEN; ++ a, t ^= 1) { temp[t][0] = a; for(int b = 1; b <= WORD_MAXLEN; ++ b) temp[t][b] = MIN(temp[t^1][b] + 1, MIN(temp[t][b-1] + 1, temp[t^1][b-1] + (A[a-1] != B[b-1]))); } return temp[t^1][WORD_MAXLEN]; } void printHead(void) { printf(" word | "); #ifdef __CUDA__ printf(" gpu | "); #endif // __CUDA__ #ifdef __CPU__ printf(" cpu | "); #endif // __CPU__ printf("distance\n"); printf("------------------|-"); #ifdef __CUDA__ printf("-------------------------------|-"); #endif // __CUDA__ #ifdef __CPU__ printf("-------------------------------|-"); #endif // __CPU__ printf("---------\n"); } #ifdef __CUDA__ __global__ void LevenshteinCUDA(const char *dictionary, const int words, const char *pattern, int *result) { int start = blockIdx.x * THREADS + threadIdx.x; int act = LevenshteinDistance(pattern, &dictionary[WORD_MAXLEN * start]); if(atomicMin(&result[1], act) > act) result[0] = start; } #endif // __CUDA__
e98f3d8d2ef14cffb4ae5c071ef2905291b35500.cu
/* 2013 * Maciej Szeptuch * II UWr */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <errno.h> #define WORD_MAXLEN 16 #define THREADS 64 #define __CUDA__ #define __CPU__ __device__ __host__ inline int MIN(const int a, const int b) { return a<b?a:b; } __device__ __host__ int LevenshteinDistance(const char *const A, const char *const B); char *loadDictionary(const char *const file, int &words, int &size); void printHead(void); #ifdef __CUDA__ __global__ void LevenshteinCUDA(const char *const dictionary, const int words, const char *const pattern, int *result); #endif // __CUDA__ #ifdef __CPU__ int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern); #endif // __CPU__ int main(const int argc, const char *const* argv) { if(argc < 3) { fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]); return 1; } int dictionarySize = 0, words = 0; char *dictionary = loadDictionary(argv[1], words, dictionarySize); if(!dictionary) { fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno)); return 2; } #ifdef __CUDA__ // GPU INIT char *cudaDictionary = NULL, *cudaPattern = NULL; int *cudaResult = NULL; cudaMalloc(&cudaDictionary, dictionarySize * sizeof(char)); cudaMalloc(&cudaPattern, WORD_MAXLEN * sizeof(char)); cudaMalloc(&cudaResult, 2 * sizeof(int)); cudaMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(char), cudaMemcpyHostToDevice); #endif // __CUDA__ printHead(); for(int a = 2; a < argc; ++ a) { int result[2] = {1 << 30, 1 << 30}; char pattern[WORD_MAXLEN + 2] = {}; memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(char)); printf(" %-16s | ", argv[a]); #ifdef __CUDA__ { // GPU TEST cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, NULL); cudaMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(cudaResult, result, 2 * sizeof(int), cudaMemcpyHostToDevice); LevenshteinCUDA<<<(words + THREADS - 1) / THREADS, THREADS>>> (cudaDictionary, words, cudaPattern, cudaResult); cudaMemcpy(result, cudaResult, 2 * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(end, NULL); cudaEventSynchronize(end); float gputotal = 0; cudaEventElapsedTime(&gputotal, start, end); printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal); } #endif // __CUDA__ #ifdef __CPU__ { // CPU TEST timeval start, end; gettimeofday(&start, NULL); result[0] = LevenshteinCPU(dictionary, words, pattern); gettimeofday(&end, NULL); float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f; printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], cputotal); } #endif // __CPU__ printf("%d\n", LevenshteinDistance(pattern, &dictionary[result[0] * WORD_MAXLEN])); } #ifdef __CUDA__ cudaFree(cudaDictionary); #endif // __CUDA__ free(dictionary); return 0; } char *loadDictionary(const char *const file, int &words, int &size) { FILE *handle = fopen(file, "rb"); if(!handle) return NULL; char *dictionary = NULL, *current = NULL; char buffer[64] = {}; words = 0; while(fgets(buffer, 64, handle)) ++ words; fseek(handle, 0, SEEK_SET); size = words * WORD_MAXLEN; current = dictionary = new char[size]; memset(dictionary, 0, size * sizeof(char)); while(fgets(current, WORD_MAXLEN + 8, handle)) { current[strlen(current) - 1] = 0; // remove \n current[strlen(current) - 1] = 0; // remove \r current += WORD_MAXLEN; } fclose(handle); return dictionary; } #ifdef __CPU__ int LevenshteinCPU(const char *const dictionary, const int words, const char *const pattern) { const char *word = dictionary; int best = 1 << 30, r = 0; for(int w = 0; w < words; ++ w, word += WORD_MAXLEN) { int dist = LevenshteinDistance(pattern, word); if(dist < best) { best = dist; r = w; } } return r; } #endif // __CPU__ __device__ __host__ inline int LevenshteinDistance(const char *const A, const char *const B) { int temp[2][WORD_MAXLEN + 2] = {}; int t = 1; for(int a = 0; a <= WORD_MAXLEN; ++ a) temp[0][a] = a; for(int a = 1; a <= WORD_MAXLEN; ++ a, t ^= 1) { temp[t][0] = a; for(int b = 1; b <= WORD_MAXLEN; ++ b) temp[t][b] = MIN(temp[t^1][b] + 1, MIN(temp[t][b-1] + 1, temp[t^1][b-1] + (A[a-1] != B[b-1]))); } return temp[t^1][WORD_MAXLEN]; } void printHead(void) { printf(" word | "); #ifdef __CUDA__ printf(" gpu | "); #endif // __CUDA__ #ifdef __CPU__ printf(" cpu | "); #endif // __CPU__ printf("distance\n"); printf("------------------|-"); #ifdef __CUDA__ printf("-------------------------------|-"); #endif // __CUDA__ #ifdef __CPU__ printf("-------------------------------|-"); #endif // __CPU__ printf("---------\n"); } #ifdef __CUDA__ __global__ void LevenshteinCUDA(const char *dictionary, const int words, const char *pattern, int *result) { int start = blockIdx.x * THREADS + threadIdx.x; int act = LevenshteinDistance(pattern, &dictionary[WORD_MAXLEN * start]); if(atomicMin(&result[1], act) > act) result[0] = start; } #endif // __CUDA__
16b03c65ffa9db65e146002498ce9c6e7546e02f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GridTools * * Copyright (c) 2014-2019, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "implementation.cpp" #include <iostream> namespace gpu_array { template <class T> struct my_array { using data_t = T; T *data; int sizes[3]; int strides[3]; __device__ const T &operator()(int i, int j, int k) const { assert(i < sizes[0] && j < sizes[1] && k < sizes[2] && "out of bounds"); return data[i * strides[0] + j * strides[1] + k * strides[2]]; } __device__ T &operator()(int i, int j, int k) { assert(i < sizes[0] && j < sizes[1] && k < sizes[2] && "out of bounds"); return data[i * strides[0] + j * strides[1] + k * strides[2]]; } }; template <typename T> my_array<T> bindgen_make_fortran_array_view(bindgen_fortran_array_descriptor *descriptor, my_array<T> *) { if (descriptor->rank != 3) { throw std::runtime_error("only 3-dimensional arrays are supported"); } hipPointerAttribute_t attributes; auto ret = hipPointerGetAttributes(&attributes, descriptor->data); if (ret != hipSuccess || attributes.memoryType != hipMemoryTypeDevice) { throw std::runtime_error("no gpu pointer"); } return my_array<T>{static_cast<T *>(descriptor->data), {descriptor->dims[0], descriptor->dims[1], descriptor->dims[2]}, {1, descriptor->dims[0], descriptor->dims[0] * descriptor->dims[1]}}; } template <typename T> bindgen_fortran_array_descriptor get_fortran_view_meta(my_array<T> *) { bindgen_fortran_array_descriptor descriptor; descriptor.type = cpp_bindgen::fortran_array_element_kind<T>::value; descriptor.rank = 3; descriptor.is_acc_present = true; return descriptor; } static_assert(cpp_bindgen::is_fortran_array_bindable<my_array<double>>::value, ""); static_assert(cpp_bindgen::is_fortran_array_wrappable<my_array<double>>::value, ""); } // namespace gpu_array namespace { __global__ void fill_array_kernel(gpu_array::my_array<double> a) { for (size_t i = 0; i < a.sizes[2]; ++i) { a(threadIdx.x, blockIdx.x, i) = threadIdx.x * 10000 + blockIdx.x * 100 + i; } } void fill_gpu_array_impl(gpu_array::my_array<double> a) {hipLaunchKernelGGL(( fill_array_kernel), dim3(a.sizes[1]), dim3(a.sizes[0]), 0, 0, a); } BINDGEN_EXPORT_BINDING_WRAPPED_1(fill_gpu_array, fill_gpu_array_impl); } // namespace
16b03c65ffa9db65e146002498ce9c6e7546e02f.cu
/* * GridTools * * Copyright (c) 2014-2019, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "implementation.cpp" #include <iostream> namespace gpu_array { template <class T> struct my_array { using data_t = T; T *data; int sizes[3]; int strides[3]; __device__ const T &operator()(int i, int j, int k) const { assert(i < sizes[0] && j < sizes[1] && k < sizes[2] && "out of bounds"); return data[i * strides[0] + j * strides[1] + k * strides[2]]; } __device__ T &operator()(int i, int j, int k) { assert(i < sizes[0] && j < sizes[1] && k < sizes[2] && "out of bounds"); return data[i * strides[0] + j * strides[1] + k * strides[2]]; } }; template <typename T> my_array<T> bindgen_make_fortran_array_view(bindgen_fortran_array_descriptor *descriptor, my_array<T> *) { if (descriptor->rank != 3) { throw std::runtime_error("only 3-dimensional arrays are supported"); } cudaPointerAttributes attributes; auto ret = cudaPointerGetAttributes(&attributes, descriptor->data); if (ret != cudaSuccess || attributes.memoryType != cudaMemoryTypeDevice) { throw std::runtime_error("no gpu pointer"); } return my_array<T>{static_cast<T *>(descriptor->data), {descriptor->dims[0], descriptor->dims[1], descriptor->dims[2]}, {1, descriptor->dims[0], descriptor->dims[0] * descriptor->dims[1]}}; } template <typename T> bindgen_fortran_array_descriptor get_fortran_view_meta(my_array<T> *) { bindgen_fortran_array_descriptor descriptor; descriptor.type = cpp_bindgen::fortran_array_element_kind<T>::value; descriptor.rank = 3; descriptor.is_acc_present = true; return descriptor; } static_assert(cpp_bindgen::is_fortran_array_bindable<my_array<double>>::value, ""); static_assert(cpp_bindgen::is_fortran_array_wrappable<my_array<double>>::value, ""); } // namespace gpu_array namespace { __global__ void fill_array_kernel(gpu_array::my_array<double> a) { for (size_t i = 0; i < a.sizes[2]; ++i) { a(threadIdx.x, blockIdx.x, i) = threadIdx.x * 10000 + blockIdx.x * 100 + i; } } void fill_gpu_array_impl(gpu_array::my_array<double> a) { fill_array_kernel<<<a.sizes[1], a.sizes[0]>>>(a); } BINDGEN_EXPORT_BINDING_WRAPPED_1(fill_gpu_array, fill_gpu_array_impl); } // namespace
ee87bbea801ff1a755370b73e2130a91eefbf1ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <float.h> template <typename scalar_t> __device__ void WarpReduce( volatile scalar_t* min_dists, volatile long* min_idxs, const size_t tid) { // s = 32 if (min_dists[tid] > min_dists[tid + 32]) { min_idxs[tid] = min_idxs[tid + 32]; min_dists[tid] = min_dists[tid + 32]; } // s = 16 if (min_dists[tid] > min_dists[tid + 16]) { min_idxs[tid] = min_idxs[tid + 16]; min_dists[tid] = min_dists[tid + 16]; } // s = 8 if (min_dists[tid] > min_dists[tid + 8]) { min_idxs[tid] = min_idxs[tid + 8]; min_dists[tid] = min_dists[tid + 8]; } // s = 4 if (min_dists[tid] > min_dists[tid + 4]) { min_idxs[tid] = min_idxs[tid + 4]; min_dists[tid] = min_dists[tid + 4]; } // s = 2 if (min_dists[tid] > min_dists[tid + 2]) { min_idxs[tid] = min_idxs[tid + 2]; min_dists[tid] = min_dists[tid + 2]; } // s = 1 if (min_dists[tid] > min_dists[tid + 1]) { min_idxs[tid] = min_idxs[tid + 1]; min_dists[tid] = min_dists[tid + 1]; } } // CUDA kernel to compute nearest neighbors between two batches of pointclouds // where each point is of dimension D. // // Args: // points1: First set of points, of shape (N, P1, D). // points2: Second set of points, of shape (N, P2, D). // idx: Output memory buffer of shape (N, P1). // N: Batch size. // P1: Number of points in points1. // P2: Number of points in points2. // D_2: Size of the shared buffer; this is D rounded up so that memory access // is aligned. // template <typename scalar_t> __global__ void NearestNeighborKernel( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, long* __restrict__ idx, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t D_2) { // Each block will compute one element of the output idx[n, i]. Within the // block we will use threads to compute the distances between points1[n, i] // and points2[n, j] for all 0 <= j < P2, then use a block reduction to // take an argmin of the distances. // Shared buffers for the threads in the block. CUDA only allows declaration // of a single shared buffer, so it needs to be manually sliced and cast to // build several logical shared buffers of different types. extern __shared__ char shared_buf[]; scalar_t* x = (scalar_t*)shared_buf; // scalar_t[DD] scalar_t* min_dists = &x[D_2]; // scalar_t[NUM_THREADS] long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS] const size_t n = blockIdx.y; // index of batch element. const size_t i = blockIdx.x; // index of point within batch element. const size_t tid = threadIdx.x; // Thread 0 copies points1[n, i, :] into x. if (tid == 0) { for (size_t d = 0; d < D; d++) { x[d] = points1[n * (P1 * D) + i * D + d]; } } __syncthreads(); // Compute the distances between points1[n, i] and points2[n, j] for // all 0 <= j < P2. Here each thread will reduce over P2 / blockDim.x // in serial, and store its result to shared memory scalar_t min_dist = FLT_MAX; size_t min_idx = 0; for (size_t j = tid; j < P2; j += blockDim.x) { scalar_t dist = 0; for (size_t d = 0; d < D; d++) { scalar_t x_d = x[d]; scalar_t y_d = points2[n * (P2 * D) + j * D + d]; scalar_t diff = x_d - y_d; dist += diff * diff; } min_dist = (j == tid) ? dist : min_dist; min_idx = (dist <= min_dist) ? j : min_idx; min_dist = (dist <= min_dist) ? dist : min_dist; } min_dists[tid] = min_dist; min_idxs[tid] = min_idx; __syncthreads(); // Perform reduction in shared memory. for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { if (min_dists[tid] > min_dists[tid + s]) { min_dists[tid] = min_dists[tid + s]; min_idxs[tid] = min_idxs[tid + s]; } } __syncthreads(); } // Unroll the last 6 iterations of the loop since they will happen // synchronized within a single warp. if (tid < 32) WarpReduce<scalar_t>(min_dists, min_idxs, tid); // Finally thread 0 writes the result to the output buffer. if (tid == 0) { idx[n * P1 + i] = min_idxs[0]; } } // CUDA kernel to compute nearest neighbors between two sets of 3-dimensional // pointclouds. This is a specialization of the nearest_neighbor_kernel // to the case D=3. // // Args: // points1: First set of pointclouds, of shape (N, P1, 3). // points2: Second set of pointclouds, of shape (N, P2, 3). // idx: Output memory buffer of shape (N, P1). // N: Batch size. // P1: Number of points in points1. // P2: Number of points in points2. // template <typename scalar_t> __global__ void NearestNeighborKernelD3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, long* __restrict__ idx, const size_t N, const size_t P1, const size_t P2) { // Single shared memory buffer which is split and cast to different types. extern __shared__ char shared_buf[]; scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[NUM_THREADS] long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS] const size_t D = 3; const size_t n = blockIdx.y; // index of batch element. const size_t i = blockIdx.x; // index of point within batch element. const size_t tid = threadIdx.x; // Retrieve the coordinates of points1[n, i] from global memory; these // will be stored in registers for fast access. const scalar_t x = points1[n * (P1 * D) + i * D + 0]; const scalar_t y = points1[n * (P1 * D) + i * D + 1]; const scalar_t z = points1[n * (P1 * D) + i * D + 2]; // Compute distances between points1[n, i] and all points2[n, j] // for 0 <= j < P2 scalar_t min_dist = FLT_MAX; size_t min_idx = 0; // Distance computation for points in p2 spread across threads in the block. for (size_t j = tid; j < P2; j += blockDim.x) { scalar_t dx = x - points2[n * (P2 * D) + j * D + 0]; scalar_t dy = y - points2[n * (P2 * D) + j * D + 1]; scalar_t dz = z - points2[n * (P2 * D) + j * D + 2]; scalar_t dist = dx * dx + dy * dy + dz * dz; min_dist = (j == tid) ? dist : min_dist; min_idx = (dist <= min_dist) ? j : min_idx; min_dist = (dist <= min_dist) ? dist : min_dist; } min_dists[tid] = min_dist; min_idxs[tid] = min_idx; // Synchronize local threads writing to the shared memory buffer. __syncthreads(); // Perform reduction in shared memory. for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { if (min_dists[tid] > min_dists[tid + s]) { min_dists[tid] = min_dists[tid + s]; min_idxs[tid] = min_idxs[tid + s]; } } // Synchronize local threads so that min_dists is correct. __syncthreads(); } // Unroll the last 6 iterations of the loop since they will happen // synchronized within a single warp. if (tid < 32) WarpReduce<scalar_t>(min_dists, min_idxs, tid); // Finally thread 0 writes the result to the output buffer. if (tid == 0) { idx[n * P1 + i] = min_idxs[0]; } } at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) { const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p1.size(2); AT_ASSERTM(p2.size(2) == D, "Point sets must have same last dimension."); auto idx = at::empty({N, P1}, p1.options().dtype(at::kLong)); // On P100 with pointclouds of size (16, 5000, 3), 128 threads per block // gives best results. const int threads = 128; const dim3 blocks(P1, N); if (D == 3) { // Use the specialized kernel for D=3. AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] { size_t shared_size = threads * sizeof(size_t) + threads * sizeof(long); hipLaunchKernelGGL(( NearestNeighborKernelD3<scalar_t>) , dim3(blocks), dim3(threads), shared_size, 0, p1.data_ptr<scalar_t>(), p2.data_ptr<scalar_t>(), idx.data_ptr<long>(), N, P1, P2); })); } else { // Use the general kernel for all other D. AT_DISPATCH_FLOATING_TYPES( p1.type(), "nearest_neighbor_v3_cuda", ([&] { // To avoid misaligned memory access, the size of shared buffers // need to be rounded to the next even size. size_t D_2 = D + (D % 2); size_t shared_size = (D_2 + threads) * sizeof(size_t); shared_size += threads * sizeof(long); hipLaunchKernelGGL(( NearestNeighborKernel<scalar_t>), dim3(blocks), dim3(threads), shared_size, 0, p1.data_ptr<scalar_t>(), p2.data_ptr<scalar_t>(), idx.data_ptr<long>(), N, P1, P2, D, D_2); })); } return idx; }
ee87bbea801ff1a755370b73e2130a91eefbf1ff.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <float.h> template <typename scalar_t> __device__ void WarpReduce( volatile scalar_t* min_dists, volatile long* min_idxs, const size_t tid) { // s = 32 if (min_dists[tid] > min_dists[tid + 32]) { min_idxs[tid] = min_idxs[tid + 32]; min_dists[tid] = min_dists[tid + 32]; } // s = 16 if (min_dists[tid] > min_dists[tid + 16]) { min_idxs[tid] = min_idxs[tid + 16]; min_dists[tid] = min_dists[tid + 16]; } // s = 8 if (min_dists[tid] > min_dists[tid + 8]) { min_idxs[tid] = min_idxs[tid + 8]; min_dists[tid] = min_dists[tid + 8]; } // s = 4 if (min_dists[tid] > min_dists[tid + 4]) { min_idxs[tid] = min_idxs[tid + 4]; min_dists[tid] = min_dists[tid + 4]; } // s = 2 if (min_dists[tid] > min_dists[tid + 2]) { min_idxs[tid] = min_idxs[tid + 2]; min_dists[tid] = min_dists[tid + 2]; } // s = 1 if (min_dists[tid] > min_dists[tid + 1]) { min_idxs[tid] = min_idxs[tid + 1]; min_dists[tid] = min_dists[tid + 1]; } } // CUDA kernel to compute nearest neighbors between two batches of pointclouds // where each point is of dimension D. // // Args: // points1: First set of points, of shape (N, P1, D). // points2: Second set of points, of shape (N, P2, D). // idx: Output memory buffer of shape (N, P1). // N: Batch size. // P1: Number of points in points1. // P2: Number of points in points2. // D_2: Size of the shared buffer; this is D rounded up so that memory access // is aligned. // template <typename scalar_t> __global__ void NearestNeighborKernel( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, long* __restrict__ idx, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t D_2) { // Each block will compute one element of the output idx[n, i]. Within the // block we will use threads to compute the distances between points1[n, i] // and points2[n, j] for all 0 <= j < P2, then use a block reduction to // take an argmin of the distances. // Shared buffers for the threads in the block. CUDA only allows declaration // of a single shared buffer, so it needs to be manually sliced and cast to // build several logical shared buffers of different types. extern __shared__ char shared_buf[]; scalar_t* x = (scalar_t*)shared_buf; // scalar_t[DD] scalar_t* min_dists = &x[D_2]; // scalar_t[NUM_THREADS] long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS] const size_t n = blockIdx.y; // index of batch element. const size_t i = blockIdx.x; // index of point within batch element. const size_t tid = threadIdx.x; // Thread 0 copies points1[n, i, :] into x. if (tid == 0) { for (size_t d = 0; d < D; d++) { x[d] = points1[n * (P1 * D) + i * D + d]; } } __syncthreads(); // Compute the distances between points1[n, i] and points2[n, j] for // all 0 <= j < P2. Here each thread will reduce over P2 / blockDim.x // in serial, and store its result to shared memory scalar_t min_dist = FLT_MAX; size_t min_idx = 0; for (size_t j = tid; j < P2; j += blockDim.x) { scalar_t dist = 0; for (size_t d = 0; d < D; d++) { scalar_t x_d = x[d]; scalar_t y_d = points2[n * (P2 * D) + j * D + d]; scalar_t diff = x_d - y_d; dist += diff * diff; } min_dist = (j == tid) ? dist : min_dist; min_idx = (dist <= min_dist) ? j : min_idx; min_dist = (dist <= min_dist) ? dist : min_dist; } min_dists[tid] = min_dist; min_idxs[tid] = min_idx; __syncthreads(); // Perform reduction in shared memory. for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { if (min_dists[tid] > min_dists[tid + s]) { min_dists[tid] = min_dists[tid + s]; min_idxs[tid] = min_idxs[tid + s]; } } __syncthreads(); } // Unroll the last 6 iterations of the loop since they will happen // synchronized within a single warp. if (tid < 32) WarpReduce<scalar_t>(min_dists, min_idxs, tid); // Finally thread 0 writes the result to the output buffer. if (tid == 0) { idx[n * P1 + i] = min_idxs[0]; } } // CUDA kernel to compute nearest neighbors between two sets of 3-dimensional // pointclouds. This is a specialization of the nearest_neighbor_kernel // to the case D=3. // // Args: // points1: First set of pointclouds, of shape (N, P1, 3). // points2: Second set of pointclouds, of shape (N, P2, 3). // idx: Output memory buffer of shape (N, P1). // N: Batch size. // P1: Number of points in points1. // P2: Number of points in points2. // template <typename scalar_t> __global__ void NearestNeighborKernelD3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, long* __restrict__ idx, const size_t N, const size_t P1, const size_t P2) { // Single shared memory buffer which is split and cast to different types. extern __shared__ char shared_buf[]; scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[NUM_THREADS] long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS] const size_t D = 3; const size_t n = blockIdx.y; // index of batch element. const size_t i = blockIdx.x; // index of point within batch element. const size_t tid = threadIdx.x; // Retrieve the coordinates of points1[n, i] from global memory; these // will be stored in registers for fast access. const scalar_t x = points1[n * (P1 * D) + i * D + 0]; const scalar_t y = points1[n * (P1 * D) + i * D + 1]; const scalar_t z = points1[n * (P1 * D) + i * D + 2]; // Compute distances between points1[n, i] and all points2[n, j] // for 0 <= j < P2 scalar_t min_dist = FLT_MAX; size_t min_idx = 0; // Distance computation for points in p2 spread across threads in the block. for (size_t j = tid; j < P2; j += blockDim.x) { scalar_t dx = x - points2[n * (P2 * D) + j * D + 0]; scalar_t dy = y - points2[n * (P2 * D) + j * D + 1]; scalar_t dz = z - points2[n * (P2 * D) + j * D + 2]; scalar_t dist = dx * dx + dy * dy + dz * dz; min_dist = (j == tid) ? dist : min_dist; min_idx = (dist <= min_dist) ? j : min_idx; min_dist = (dist <= min_dist) ? dist : min_dist; } min_dists[tid] = min_dist; min_idxs[tid] = min_idx; // Synchronize local threads writing to the shared memory buffer. __syncthreads(); // Perform reduction in shared memory. for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) { if (min_dists[tid] > min_dists[tid + s]) { min_dists[tid] = min_dists[tid + s]; min_idxs[tid] = min_idxs[tid + s]; } } // Synchronize local threads so that min_dists is correct. __syncthreads(); } // Unroll the last 6 iterations of the loop since they will happen // synchronized within a single warp. if (tid < 32) WarpReduce<scalar_t>(min_dists, min_idxs, tid); // Finally thread 0 writes the result to the output buffer. if (tid == 0) { idx[n * P1 + i] = min_idxs[0]; } } at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) { const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p1.size(2); AT_ASSERTM(p2.size(2) == D, "Point sets must have same last dimension."); auto idx = at::empty({N, P1}, p1.options().dtype(at::kLong)); // On P100 with pointclouds of size (16, 5000, 3), 128 threads per block // gives best results. const int threads = 128; const dim3 blocks(P1, N); if (D == 3) { // Use the specialized kernel for D=3. AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] { size_t shared_size = threads * sizeof(size_t) + threads * sizeof(long); NearestNeighborKernelD3<scalar_t> <<<blocks, threads, shared_size>>>( p1.data_ptr<scalar_t>(), p2.data_ptr<scalar_t>(), idx.data_ptr<long>(), N, P1, P2); })); } else { // Use the general kernel for all other D. AT_DISPATCH_FLOATING_TYPES( p1.type(), "nearest_neighbor_v3_cuda", ([&] { // To avoid misaligned memory access, the size of shared buffers // need to be rounded to the next even size. size_t D_2 = D + (D % 2); size_t shared_size = (D_2 + threads) * sizeof(size_t); shared_size += threads * sizeof(long); NearestNeighborKernel<scalar_t><<<blocks, threads, shared_size>>>( p1.data_ptr<scalar_t>(), p2.data_ptr<scalar_t>(), idx.data_ptr<long>(), N, P1, P2, D, D_2); })); } return idx; }
6232ef1cb4bdd7c598b913e71317ebabd29c0f26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/arg_where_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/common/fixed_vector.h" #include <hipcub/hipcub.hpp> namespace oneflow { namespace { constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128; template<typename T, size_t NDims> struct StrideIterator { typedef StrideIterator self_type; typedef std::ptrdiff_t difference_type; typedef T value_type; typedef T* pointer; typedef T& reference; typedef std::random_access_iterator_tag iterator_category; explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {} OF_DEVICE_FUNC reference operator[](int i) { assert(0 <= i && i < max_iters_); return *(ptr_ + (i * NDims)); } private: T* ptr_; size_t max_iters_; }; template<typename T, size_t NDims> __global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter, const T* num_indices_ptr, T* indices_ptr) { CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) { T* cur_indices_ptr = indices_ptr + i * NDims; index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr); } } template<typename T> struct IsTrue { OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); } }; template<typename T, typename I, typename Iter> hipError_t SelectTrue(hipStream_t stream, int num_items, void* tmp, size_t& tmp_bytes, const T* flags, Iter out_iter, I* num_selected) { IsTrue<T> is_true; hipcub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true); hipcub::CountingInputIterator<I> offset_counter(0); return hipcub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter, num_selected, num_items, stream, false); } } // namespace template<typename T, typename I, size_t NDims> struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> { static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp, size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) { CHECK_NOTNULL(ctx); CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max()); size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt()); CHECK_LE(tmp_bytes, tmp_max_bytes); if (NDims == 1) { CudaCheck(SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_ptr, out_size_ptr)); } else { StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt()); CudaCheck(SelectTrue<T, I, StrideIterator<I, NDims>>( ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_iter, out_size_ptr)); fixed_vector<I, NDims> dims(NDims); std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(), [](int64_t dim) { return static_cast<I>(dim); }); NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size()); hipLaunchKernelGGL(( CudaOffsetToNdIndexInplace<I, NDims>) , dim3(kFlatIndexToNdIndexProposedLaunchBlocks), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), index_converter, out_size_ptr, out_ptr); } } static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) { hipStream_t stream = ctx ? ctx->cuda_stream() : 0; size_t tmp_bytes = 0; if (NDims == 1) { CudaCheck(SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr)); } else { StrideIterator<I, NDims> out_iter(nullptr, n); CudaCheck(SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes, nullptr, out_iter, nullptr)); } return tmp_bytes; } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU), ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
6232ef1cb4bdd7c598b913e71317ebabd29c0f26.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/arg_where_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/common/fixed_vector.h" #include <cub/cub.cuh> namespace oneflow { namespace { constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128; template<typename T, size_t NDims> struct StrideIterator { typedef StrideIterator self_type; typedef std::ptrdiff_t difference_type; typedef T value_type; typedef T* pointer; typedef T& reference; typedef std::random_access_iterator_tag iterator_category; explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {} OF_DEVICE_FUNC reference operator[](int i) { assert(0 <= i && i < max_iters_); return *(ptr_ + (i * NDims)); } private: T* ptr_; size_t max_iters_; }; template<typename T, size_t NDims> __global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter, const T* num_indices_ptr, T* indices_ptr) { CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) { T* cur_indices_ptr = indices_ptr + i * NDims; index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr); } } template<typename T> struct IsTrue { OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); } }; template<typename T, typename I, typename Iter> cudaError_t SelectTrue(cudaStream_t stream, int num_items, void* tmp, size_t& tmp_bytes, const T* flags, Iter out_iter, I* num_selected) { IsTrue<T> is_true; cub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true); cub::CountingInputIterator<I> offset_counter(0); return cub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter, num_selected, num_items, stream, false); } } // namespace template<typename T, typename I, size_t NDims> struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> { static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp, size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) { CHECK_NOTNULL(ctx); CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max()); size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt()); CHECK_LE(tmp_bytes, tmp_max_bytes); if (NDims == 1) { CudaCheck(SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_ptr, out_size_ptr)); } else { StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt()); CudaCheck(SelectTrue<T, I, StrideIterator<I, NDims>>( ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_iter, out_size_ptr)); fixed_vector<I, NDims> dims(NDims); std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(), [](int64_t dim) { return static_cast<I>(dim); }); NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size()); CudaOffsetToNdIndexInplace<I, NDims> <<<kFlatIndexToNdIndexProposedLaunchBlocks, kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(index_converter, out_size_ptr, out_ptr); } } static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) { cudaStream_t stream = ctx ? ctx->cuda_stream() : 0; size_t tmp_bytes = 0; if (NDims == 1) { CudaCheck(SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr)); } else { StrideIterator<I, NDims> out_iter(nullptr, n); CudaCheck(SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes, nullptr, out_iter, nullptr)); } return tmp_bytes; } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU), ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
2d6fd847668e72a9e204307d12c862fbc18373b9.hip
// !!! This is a file automatically generated by hipify!!! /* util.cu: CUDA convenience functions. */ #include "roughsort.h" // TODO: propagate __FILE__ and __LINE__ from caller if GDB proves inadequate #define CHECK(r) cuCheck(r, __FILE__, __LINE__) inline void cuCheck(hipError_t r, const char* fname, const size_t lnum) { if (r != hipSuccess) { fatal("CUDA error at line %d in %s: %s\n", lnum, fname, hipGetErrorString(r)); } } size_t cuMemAvail() { size_t free, total; CHECK(hipMemGetInfo(&free, &total)); return free; } void* cuMalloc(size_t size) { void* p; CHECK(hipMalloc(&p, size)); return p; } void cuFree(void* p) { CHECK(hipFree(p)); } void cuClear(void* p, size_t size) { CHECK(hipMemset(p, 0, size)); } void cuUpload(void* devDst, const void* hostSrc, size_t size) { CHECK(hipMemcpy(devDst, hostSrc, size, hipMemcpyHostToDevice)); } void cuDownload(void* hostDst, const void* devSrc, size_t size) { CHECK(hipMemcpy(hostDst, devSrc, size, hipMemcpyDeviceToHost)); } void cuPin(void* p, size_t size) { CHECK(hipHostRegister(p, size, hipHostRegisterPortable)); } void cuUnpin(void* p) { CHECK(hipHostUnregister(p)); }
2d6fd847668e72a9e204307d12c862fbc18373b9.cu
/* util.cu: CUDA convenience functions. */ #include "roughsort.h" // TODO: propagate __FILE__ and __LINE__ from caller if GDB proves inadequate #define CHECK(r) cuCheck(r, __FILE__, __LINE__) inline void cuCheck(cudaError_t r, const char* fname, const size_t lnum) { if (r != cudaSuccess) { fatal("CUDA error at line %d in %s: %s\n", lnum, fname, cudaGetErrorString(r)); } } size_t cuMemAvail() { size_t free, total; CHECK(cudaMemGetInfo(&free, &total)); return free; } void* cuMalloc(size_t size) { void* p; CHECK(cudaMalloc(&p, size)); return p; } void cuFree(void* p) { CHECK(cudaFree(p)); } void cuClear(void* p, size_t size) { CHECK(cudaMemset(p, 0, size)); } void cuUpload(void* devDst, const void* hostSrc, size_t size) { CHECK(cudaMemcpy(devDst, hostSrc, size, cudaMemcpyHostToDevice)); } void cuDownload(void* hostDst, const void* devSrc, size_t size) { CHECK(cudaMemcpy(hostDst, devSrc, size, cudaMemcpyDeviceToHost)); } void cuPin(void* p, size_t size) { CHECK(cudaHostRegister(p, size, cudaHostRegisterPortable)); } void cuUnpin(void* p) { CHECK(cudaHostUnregister(p)); }
9b142deac98dfb133f4646e73d179f43c015687b.hip
// !!! This is a file automatically generated by hipify!!! #include "solver.h" Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule, double learning_rate, double learning_rate_decay, int num_train, int num_val) { this->model = model; this->X_train = X_train, this->X_val = X_val; this->y_train = y_train, this->y_val = y_val; this->num_epoch = num_epoch; this->update_rule = update_rule; this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay; this->num_train = num_train, this->num_val = num_val; this->num_features = model->input_channels * model->input_h * model->input_w; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); } float Solver::step(int start_X, int start_y) { std::vector<float> t1, t2; return this->step(start_X, start_y, t1, t2); } float Solver::step(int start_X, int start_y, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag) { float temp_loss; // std::cout << "start_X: " << start_X << std::endl; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); // float Salpha = -learning_rate; // double Dalpha = -learning_rate; // if (update_rule == SGD) { // for (int i = 0; i < model->num_layers; i++) { // if (model->layer_type[i] == CONV) { // ConvLayerParams *cur_params = (ConvLayerParams *)model->params[i]; // int kernel_size = cur_params->C_in * cur_params->C_out * cur_params->filter_h * cur_params->filter_w; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, kernel_size, // &Salpha, // (float *)cur_params->dW, 1, // (float *)cur_params->W, 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_out, // &Salpha, // (float *)cur_params->db, 1, // (float *)cur_params->b, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, kernel_size, // &Dalpha, // (double *)cur_params->dW, 1, // (double *)cur_params->W, 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_out, // &Dalpha, // (double *)cur_params->db, 1, // (double *)cur_params->b, 1)); // } // } // else if (model->layer_type[i] == FULLY_CONNECTED) { // FCLayerParams *cur_params = (FCLayerParams *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out, // &Salpha, // (float *)cur_params->dW, 1, // (float *)cur_params->W, 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_out, // &Salpha, // (float *)cur_params->db, 1, // (float *)cur_params->b, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out, // &Dalpha, // (double *)cur_params->dW, 1, // (double *)cur_params->W, 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_out, // &Dalpha, // (double *)cur_params->db, 1, // (double *)cur_params->b, 1)); // } // } // else if (model->layer_type[i] == BATCHNORM) { // BatchNormLayerParams *cur_params = (BatchNormLayerParams *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dscale, 1, // (float *)cur_params->scale, 1)); // checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dbias, 1, // (float *)cur_params->bias, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dscale, 1, // (double *)cur_params->scale, 1)); // checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dbias, 1, // (double *)cur_params->bias, 1)); // } // } // } // } checkCudaErrors(hipDeviceSynchronize()); return temp_loss; } void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; int num_val_batches = num_val / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli = 0; checkCudaErrors(hipEventRecord(start, model->stream_compute)); float temp_loss = step(start_sample, j * batch_size); checkCudaErrors(hipEventRecord(stop, model->stream_compute)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); std::cout << "One forward, backward pass time(ms): " << milli << std::endl; loss.push_back(temp_loss); std::cout << "loss: " << temp_loss << std::endl; } int correct_count = 0; for (int j = 0; j < num_val_batches; j++) { int start_sample = j * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); correct_count += temp_correct_count; } val_acc.push_back(correct_count); std::cout << "val_acc: " << val_acc[i] << std::endl; // learning_rate *= learning_rate_decay; // std::cout << "learning_rate: " << learning_rate << std::endl; } learning_rate *= learning_rate_decay; } void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) { int batch_size = model->batch_size; int num_iter = num_samples / batch_size; *num_correct = 0; for (int i = 0; i < num_iter; i++) { int start_sample = i * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); *num_correct = *num_correct + temp_correct_count; } } void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch, std::vector<std::vector<float> > &fwd_vdnn_lag, std::vector<std::vector<float> > &bwd_vdnn_lag) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; checkCudaErrors(hipEventRecord(start)); float milli; std::vector<float> cur_fwd_vdnn_lag, cur_bwd_vdnn_lag; float temp_loss = step(start_sample, j * batch_size, cur_fwd_vdnn_lag, cur_bwd_vdnn_lag); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); // std::cout << "One forward, backward pass time(ms): " << milli << std::endl; fwd_vdnn_lag.push_back(cur_fwd_vdnn_lag); bwd_vdnn_lag.push_back(cur_bwd_vdnn_lag); loss.push_back(temp_loss); time.push_back(milli); // std::cout << "loss: " << temp_loss << std::endl; // for (int i = 0; i < cur_fwd_vdnn_lag.size(); i++) { // std::cout << "fwd_lag " << i << ":" << cur_fwd_vdnn_lag[i] << std::endl; // } // for (int i = 0; i < cur_bwd_vdnn_lag.size(); i++) { // std::cout << "bwd_lag " << i << ":" << cur_bwd_vdnn_lag[i] << std::endl; // } } } learning_rate *= learning_rate_decay; } void Solver::getComputationTime(long num_epoch, std::vector<std::vector<float> > &fwd_computation_time, std::vector<std::vector<float> > &bwd_computation_time) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli; std::vector<float> cur_fwd_computation_time, cur_bwd_computation_time; stepComputationTime(start_sample, j * batch_size, cur_fwd_computation_time, cur_bwd_computation_time); fwd_computation_time.push_back(cur_fwd_computation_time); bwd_computation_time.push_back(cur_bwd_computation_time); } learning_rate *= learning_rate_decay; } } void Solver::getTransferTime(long num_epoch, std::vector<std::vector<float> > &fwd_transfer_time, std::vector<std::vector<float> > &bwd_transfer_time) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli; std::vector<float> cur_fwd_transfer_time, cur_bwd_transfer_time; stepTransferTime(start_sample, j * batch_size, cur_fwd_transfer_time, cur_bwd_transfer_time); fwd_transfer_time.push_back(cur_fwd_transfer_time); bwd_transfer_time.push_back(cur_bwd_transfer_time); } learning_rate *= learning_rate_decay; } } void Solver::stepComputationTime(int start_X, int start_y, std::vector<float> &fwd_computation_time, std::vector<float> &bwd_computation_time) { if (model->data_type == CUDNN_DATA_FLOAT) model->getComputationTime(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_computation_time, bwd_computation_time); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getComputationTime(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_computation_time, bwd_computation_time); } void Solver::stepTransferTime(int start_X, int start_y, std::vector<float> &fwd_transfer_time, std::vector<float> &bwd_transfer_time) { if (model->data_type == CUDNN_DATA_FLOAT) model->getTransferTime(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_transfer_time, bwd_transfer_time); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getTransferTime(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_transfer_time, bwd_transfer_time); }
9b142deac98dfb133f4646e73d179f43c015687b.cu
#include "solver.h" Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule, double learning_rate, double learning_rate_decay, int num_train, int num_val) { this->model = model; this->X_train = X_train, this->X_val = X_val; this->y_train = y_train, this->y_val = y_val; this->num_epoch = num_epoch; this->update_rule = update_rule; this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay; this->num_train = num_train, this->num_val = num_val; this->num_features = model->input_channels * model->input_h * model->input_w; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); } float Solver::step(int start_X, int start_y) { std::vector<float> t1, t2; return this->step(start_X, start_y, t1, t2); } float Solver::step(int start_X, int start_y, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag) { float temp_loss; // std::cout << "start_X: " << start_X << std::endl; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_vdnn_lag, bwd_vdnn_lag, true, NULL, &temp_loss); // float Salpha = -learning_rate; // double Dalpha = -learning_rate; // if (update_rule == SGD) { // for (int i = 0; i < model->num_layers; i++) { // if (model->layer_type[i] == CONV) { // ConvLayerParams *cur_params = (ConvLayerParams *)model->params[i]; // int kernel_size = cur_params->C_in * cur_params->C_out * cur_params->filter_h * cur_params->filter_w; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, kernel_size, // &Salpha, // (float *)cur_params->dW, 1, // (float *)cur_params->W, 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_out, // &Salpha, // (float *)cur_params->db, 1, // (float *)cur_params->b, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, kernel_size, // &Dalpha, // (double *)cur_params->dW, 1, // (double *)cur_params->W, 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_out, // &Dalpha, // (double *)cur_params->db, 1, // (double *)cur_params->b, 1)); // } // } // else if (model->layer_type[i] == FULLY_CONNECTED) { // FCLayerParams *cur_params = (FCLayerParams *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out, // &Salpha, // (float *)cur_params->dW, 1, // (float *)cur_params->W, 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_out, // &Salpha, // (float *)cur_params->db, 1, // (float *)cur_params->b, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out, // &Dalpha, // (double *)cur_params->dW, 1, // (double *)cur_params->W, 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_out, // &Dalpha, // (double *)cur_params->db, 1, // (double *)cur_params->b, 1)); // } // } // else if (model->layer_type[i] == BATCHNORM) { // BatchNormLayerParams *cur_params = (BatchNormLayerParams *)model->params[i]; // if (model->data_type == CUDNN_DATA_FLOAT) { // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dscale, 1, // (float *)cur_params->scale, 1)); // checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->sbmv_size, // &Salpha, // (float *)cur_params->dbias, 1, // (float *)cur_params->bias, 1)); // } // else if (model->data_type == CUDNN_DATA_DOUBLE) { // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dscale, 1, // (double *)cur_params->scale, 1)); // checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->sbmv_size, // &Dalpha, // (double *)cur_params->dbias, 1, // (double *)cur_params->bias, 1)); // } // } // } // } checkCudaErrors(cudaDeviceSynchronize()); return temp_loss; } void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; int num_val_batches = num_val / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli = 0; checkCudaErrors(cudaEventRecord(start, model->stream_compute)); float temp_loss = step(start_sample, j * batch_size); checkCudaErrors(cudaEventRecord(stop, model->stream_compute)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); std::cout << "One forward, backward pass time(ms): " << milli << std::endl; loss.push_back(temp_loss); std::cout << "loss: " << temp_loss << std::endl; } int correct_count = 0; for (int j = 0; j < num_val_batches; j++) { int start_sample = j * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], learning_rate, false, &temp_correct_count, NULL); correct_count += temp_correct_count; } val_acc.push_back(correct_count); std::cout << "val_acc: " << val_acc[i] << std::endl; // learning_rate *= learning_rate_decay; // std::cout << "learning_rate: " << learning_rate << std::endl; } learning_rate *= learning_rate_decay; } void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) { int batch_size = model->batch_size; int num_iter = num_samples / batch_size; *num_correct = 0; for (int i = 0; i < num_iter; i++) { int start_sample = i * num_features * batch_size; int temp_correct_count; if (model->data_type == CUDNN_DATA_FLOAT) model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], learning_rate, false, &temp_correct_count, NULL); *num_correct = *num_correct + temp_correct_count; } } void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch, std::vector<std::vector<float> > &fwd_vdnn_lag, std::vector<std::vector<float> > &bwd_vdnn_lag) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; checkCudaErrors(cudaEventRecord(start)); float milli; std::vector<float> cur_fwd_vdnn_lag, cur_bwd_vdnn_lag; float temp_loss = step(start_sample, j * batch_size, cur_fwd_vdnn_lag, cur_bwd_vdnn_lag); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); // std::cout << "One forward, backward pass time(ms): " << milli << std::endl; fwd_vdnn_lag.push_back(cur_fwd_vdnn_lag); bwd_vdnn_lag.push_back(cur_bwd_vdnn_lag); loss.push_back(temp_loss); time.push_back(milli); // std::cout << "loss: " << temp_loss << std::endl; // for (int i = 0; i < cur_fwd_vdnn_lag.size(); i++) { // std::cout << "fwd_lag " << i << ":" << cur_fwd_vdnn_lag[i] << std::endl; // } // for (int i = 0; i < cur_bwd_vdnn_lag.size(); i++) { // std::cout << "bwd_lag " << i << ":" << cur_bwd_vdnn_lag[i] << std::endl; // } } } learning_rate *= learning_rate_decay; } void Solver::getComputationTime(long num_epoch, std::vector<std::vector<float> > &fwd_computation_time, std::vector<std::vector<float> > &bwd_computation_time) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli; std::vector<float> cur_fwd_computation_time, cur_bwd_computation_time; stepComputationTime(start_sample, j * batch_size, cur_fwd_computation_time, cur_bwd_computation_time); fwd_computation_time.push_back(cur_fwd_computation_time); bwd_computation_time.push_back(cur_bwd_computation_time); } learning_rate *= learning_rate_decay; } } void Solver::getTransferTime(long num_epoch, std::vector<std::vector<float> > &fwd_transfer_time, std::vector<std::vector<float> > &bwd_transfer_time) { int batch_size = model->batch_size; int num_train_batches = num_train / model->batch_size; for (int i = 0; i < num_epoch; i++) { for (int j = 0; j < num_train_batches; j++) { int start_sample = j * num_features * batch_size; float milli; std::vector<float> cur_fwd_transfer_time, cur_bwd_transfer_time; stepTransferTime(start_sample, j * batch_size, cur_fwd_transfer_time, cur_bwd_transfer_time); fwd_transfer_time.push_back(cur_fwd_transfer_time); bwd_transfer_time.push_back(cur_bwd_transfer_time); } learning_rate *= learning_rate_decay; } } void Solver::stepComputationTime(int start_X, int start_y, std::vector<float> &fwd_computation_time, std::vector<float> &bwd_computation_time) { if (model->data_type == CUDNN_DATA_FLOAT) model->getComputationTime(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_computation_time, bwd_computation_time); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getComputationTime(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_computation_time, bwd_computation_time); } void Solver::stepTransferTime(int start_X, int start_y, std::vector<float> &fwd_transfer_time, std::vector<float> &bwd_transfer_time) { if (model->data_type == CUDNN_DATA_FLOAT) model->getTransferTime(&(((float *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_transfer_time, bwd_transfer_time); else if (model->data_type == CUDNN_DATA_DOUBLE) model->getTransferTime(&(((double *)X_train)[start_X]), &y_train[start_y], learning_rate, fwd_transfer_time, bwd_transfer_time); }
b9a06d3ff0be758cf9d0ae9536361dab444723d9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file YukawaDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairYukawa.h" #include "AllDriverPotentialPairGPU.cuh" hipError_t gpu_compute_yukawa_forces(const pair_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairYukawa>(pair_args, d_params); }
b9a06d3ff0be758cf9d0ae9536361dab444723d9.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file YukawaDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairYukawa.h" #include "AllDriverPotentialPairGPU.cuh" cudaError_t gpu_compute_yukawa_forces(const pair_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairYukawa>(pair_args, d_params); }
b5c61defa41f1d5892869f145abee222e1681674.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include <string.h> __global__ void fun(int *z){ char a[5] = "abc"; char* p = realloc(a, 5); p[0]; *z = p[0]; printf("%d\n", *z); } int main(void) { int z; int *dev_z; hipMalloc((void**)&dev_z, sizeof(int)); hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z); hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_z); return 0; } //(11): error: a value of type "void *" cannot be used to initialize an entity of type "char *";
b5c61defa41f1d5892869f145abee222e1681674.cu
#include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include <string.h> __global__ void fun(int *z){ char a[5] = "abc"; char* p = realloc(a, 5); p[0]; *z = p[0]; printf("%d\n", *z); } int main(void) { int z; int *dev_z; cudaMalloc((void**)&dev_z, sizeof(int)); fun<<<1,1>>>(dev_z); cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_z); return 0; } //编译未通过;(11): error: a value of type "void *" cannot be used to initialize an entity of type "char *";
9379e124d8beb032288b3b1f2f06f96f4f1722fa.hip
// !!! This is a file automatically generated by hipify!!! // Cuckoo Cycle, a memory-hard proof-of-work by John Tromp // Copyright (c) 2018 Jiri Vadura - photon // This CUDA part of Theta optimized miner is covered by the FAIR MINING license #include "hip/hip_runtime_api.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <xmmintrin.h> #include <algorithm> #include <stdio.h> #include <stdint.h> #include <atomic> #include <thread> #include <vector> #include <iostream> #include <fstream> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; typedef u32 node_t; typedef u64 nonce_t; #ifdef _WIN32 #define DUCK_SIZE_A 129LL #define DUCK_SIZE_B 82LL #else #define DUCK_SIZE_A 130LL #define DUCK_SIZE_B 85LL #endif #define DUCK_A_EDGES (DUCK_SIZE_A * 1024LL) #define DUCK_A_EDGES_64 (DUCK_A_EDGES * 64LL) #define DUCK_B_EDGES (DUCK_SIZE_B * 1024LL) #define DUCK_B_EDGES_64 (DUCK_B_EDGES * 64LL) #ifndef EDGEBITS #define EDGEBITS 29 #endif // number of edges #define NEDGES ((node_t)1 << EDGEBITS) // used to mask siphash output #define EDGEMASK (NEDGES - 1) #define CTHREADS 1024 #define BKTMASK4K (4096-1) #define ROTL(x,b) ( ((x) << (b)) | ( (x) >> (64 - (b))) ) #define SIPROUND \ do { \ v0 += v1; v2 += v3; v1 = ROTL(v1,13); \ v3 = ROTL(v3,16); v1 ^= v0; v3 ^= v2; \ v0 = ROTL(v0,32); v2 += v1; v0 += v3; \ v1 = ROTL(v1,17); v3 = ROTL(v3,21); \ v1 ^= v2; v3 ^= v0; v2 = ROTL(v2,32); \ } while(0) __device__ node_t dipnode(const u64 v0i, const u64 v1i, const u64 v2i, const u64 v3i, const nonce_t nce, const u32 uorv) { u64 nonce = 2 * nce + uorv; u64 v0 = v0i, v1 = v1i, v2 = v2i, v3 = v3i ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } __device__ ulonglong4 Pack4edges(const uint2 e1, const uint2 e2, const uint2 e3, const uint2 e4) { u64 r1 = (((u64)e1.y << 32) | ((u64)e1.x)); u64 r2 = (((u64)e2.y << 32) | ((u64)e2.x)); u64 r3 = (((u64)e3.y << 32) | ((u64)e3.x)); u64 r4 = (((u64)e4.y << 32) | ((u64)e4.x)); return make_ulonglong4(r1, r2, r3, r4); } __global__ void FluffySeed2A(const u64 v0i, const u64 v1i, const u64 v2i, const u64 v3i, ulonglong4 * buffer, int * indexes) { const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; __shared__ uint2 tmp[64][15]; __shared__ int counters[64]; counters[lid] = 0; __syncthreads(); for (int i = 0; i < 1024 * 16; i++) { u64 nonce = gid * (1024 * 16) + i; uint2 hash; hash.x = dipnode(v0i, v1i, v2i, v3i, nonce, 0); int bucket = hash.x & (64 - 1); __syncthreads(); int counter = min((int)atomicAdd(counters + bucket, 1), (int)14); hash.y = dipnode(v0i, v1i, v2i, v3i, nonce, 1); tmp[bucket][counter] = hash; __syncthreads(); { int localIdx = min(15, counters[lid]); if (localIdx >= 8) { int newCount = (localIdx - 8); counters[lid] = newCount; { int cnt = min((int)atomicAdd(indexes + lid, 8), (int)(DUCK_A_EDGES_64 - 8)); { buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges(tmp[lid][0], tmp[lid][1], tmp[lid][2], tmp[lid][3]); buffer[(lid * DUCK_A_EDGES_64 + cnt + 4) / 4] = Pack4edges(tmp[lid][4], tmp[lid][5], tmp[lid][6], tmp[lid][7]); } } for (int t = 0; t < newCount; t++) { tmp[lid][t] = tmp[lid][t + 8]; } } } } __syncthreads(); { int localIdx = min(16, counters[lid]); if (localIdx > 0) { int cnt = min((int)atomicAdd(indexes + lid, 4), (int)(DUCK_A_EDGES_64 - 4)); buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges( tmp[lid][0], localIdx > 1 ? tmp[lid][1] : make_uint2(0, 0), localIdx > 2 ? tmp[lid][2] : make_uint2(0, 0), localIdx > 3 ? tmp[lid][3] : make_uint2(0, 0)); } if (localIdx > 4) { int cnt = min((int)atomicAdd(indexes + lid, 4), (int)(DUCK_A_EDGES_64 - 4)); buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges( tmp[lid][4], localIdx > 5 ? tmp[lid][5] : make_uint2(0, 0), localIdx > 6 ? tmp[lid][6] : make_uint2(0, 0), localIdx > 7 ? tmp[lid][7] : make_uint2(0, 0)); } } } #define BKTGRAN 32 __global__ void FluffySeed2B(const uint2 * source, ulonglong4 * destination, const int * sourceIndexes, int * destinationIndexes, int startBlock) { //const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; const int group = blockIdx.x; __shared__ uint2 tmp[64][15]; __shared__ int counters[64]; counters[lid] = 0; __syncthreads(); const int offsetMem = startBlock * DUCK_A_EDGES_64; const int myBucket = group / BKTGRAN; const int microBlockNo = group % BKTGRAN; const int bucketEdges = min(sourceIndexes[myBucket + startBlock], (int)(DUCK_A_EDGES_64)); const int microBlockEdgesCount = (DUCK_A_EDGES_64 / BKTGRAN); const int loops = (microBlockEdgesCount / 64); for (int i = 0; i < loops; i++) { int edgeIndex = (microBlockNo * microBlockEdgesCount) + (64 * i) + lid; if (edgeIndex < bucketEdges) { uint2 edge = source[offsetMem + (myBucket * DUCK_A_EDGES_64) + edgeIndex]; if (edge.x == 0 && edge.y == 0) continue; int bucket = (edge.x >> 6) & (64 - 1); __syncthreads(); int counter = min((int)atomicAdd(counters + bucket, 1), (int)14); tmp[bucket][counter] = edge; __syncthreads(); int localIdx = min(15, counters[lid]); if (localIdx >= 8) { int newCount = (localIdx - 8); counters[lid] = newCount; { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 8), (int)(DUCK_A_EDGES - 8)); { destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges(tmp[lid][0], tmp[lid][1], tmp[lid][2], tmp[lid][3]); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt + 4) / 4] = Pack4edges(tmp[lid][4], tmp[lid][5], tmp[lid][6], tmp[lid][7]); } } for (int t = 0; t < newCount; t++) { tmp[lid][t] = tmp[lid][t + 8]; } } } } __syncthreads(); { int localIdx = min(16, counters[lid]); if (localIdx > 0) { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 4), (int)(DUCK_A_EDGES - 4)); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges( tmp[lid][0], localIdx > 1 ? tmp[lid][1] : make_uint2(0, 0), localIdx > 2 ? tmp[lid][2] : make_uint2(0, 0), localIdx > 3 ? tmp[lid][3] : make_uint2(0, 0)); } if (localIdx > 4) { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 4), (int)(DUCK_A_EDGES - 4)); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges( tmp[lid][4], localIdx > 5 ? tmp[lid][5] : make_uint2(0, 0), localIdx > 6 ? tmp[lid][6] : make_uint2(0, 0), localIdx > 7 ? tmp[lid][7] : make_uint2(0, 0)); } } } __device__ __forceinline__ void Increase2bCounter(u32 * ecounters, const int bucket) { int word = bucket >> 5; unsigned char bit = bucket & 0x1F; u32 mask = 1 << bit; u32 old = atomicOr(ecounters + word, mask) & mask; if (old > 0) atomicOr(ecounters + word + 4096, mask); } __device__ __forceinline__ bool Read2bCounter(u32 * ecounters, const int bucket) { int word = bucket >> 5; unsigned char bit = bucket & 0x1F; u32 mask = 1 << bit; return (ecounters[word + 4096] & mask) > 0; } template<int bktInSize, int bktOutSize> __global__ void FluffyRound(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes) { //const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; const int group = blockIdx.x; __shared__ u32 ecounters[8192]; const int edgesInBucket = min(sourceIndexes[group], bktInSize); const int loops = (edgesInBucket + CTHREADS) / CTHREADS; ecounters[lid] = 0; ecounters[lid + 1024] = 0; ecounters[lid + (1024 * 2)] = 0; ecounters[lid + (1024 * 3)] = 0; ecounters[lid + (1024 * 4)] = 0; ecounters[lid + (1024 * 5)] = 0; ecounters[lid + (1024 * 6)] = 0; ecounters[lid + (1024 * 7)] = 0; __syncthreads(); for (int i = 0; i < loops; i++) { const int lindex = (i * CTHREADS) + lid; if (lindex < edgesInBucket) { const int index = (bktInSize * group) + lindex; uint2 edge = source[index]; if (edge.x == 0 && edge.y == 0) continue; Increase2bCounter(ecounters, (edge.x & EDGEMASK) >> 12); } } __syncthreads(); for (int i = 0; i < loops; i++) { const int lindex = (i * CTHREADS) + lid; if (lindex < edgesInBucket) { const int index = (bktInSize * group) + lindex; uint2 edge = source[index]; if (edge.x == 0 && edge.y == 0) continue; if (Read2bCounter(ecounters, (edge.x & EDGEMASK) >> 12)) { const int bucket = edge.y & BKTMASK4K; const int bktIdx = min(atomicAdd(destinationIndexes + bucket, 1), bktOutSize - 1); destination[(bucket * bktOutSize) + bktIdx] = make_uint2(edge.y, edge.x); } } } } template __global__ void FluffyRound<DUCK_A_EDGES, DUCK_B_EDGES>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES / 2>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 4>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); __global__ void /*Magical*/FluffyTail/*Pony*/(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes) { const int lid = threadIdx.x; const int group = blockIdx.x; int myEdges = sourceIndexes[group]; __shared__ int destIdx; if (lid == 0) destIdx = atomicAdd(destinationIndexes, myEdges); __syncthreads(); if (lid < myEdges) { destination[destIdx + lid] = source[group * DUCK_B_EDGES / 4 + lid]; } } struct blockstpb { u16 blocks; u16 tpb; }; struct trimparams { u16 expand; u16 ntrims; blockstpb genA; blockstpb genB; blockstpb trim; blockstpb tail; blockstpb recover; trimparams() { #ifndef XBITS #define XBITS ((EDGEBITS-16)/2) #endif const static u32 NX = 1 << XBITS; const static u32 NX2 = NX * NX; expand = 0; ntrims = 176; genA.blocks = 4096; genA.tpb = 256; genB.blocks = NX2; genB.tpb = 128; trim.blocks = NX2; trim.tpb = 512; tail.blocks = NX2; tail.tpb = 1024; recover.blocks = 1024; recover.tpb = 1024; } }; struct edgetrimmer { const size_t bufferSize = DUCK_SIZE_A * 1024 * 4096 * 8; const size_t bufferSize2 = DUCK_SIZE_B * 1024 * 4096 * 8; const size_t indexesSize = 128 * 128 * 4; //const unsigned int edges = (1 << 29); int * bufferA; int * bufferB; int * indexesE; int * indexesE2; u32 hostA[256 * 256]; hipError_t cudaStatus; size_t free_device_mem = 0; size_t total_device_mem = 0; unsigned long long k0 = 0xa34c6a2bdaa03a14ULL; unsigned long long k1 = 0xd736650ae53eee9eULL; unsigned long long k2 = 0x9a22f05e3bffed5eULL; unsigned long long k3 = 0xb8d55478fa3a606dULL; edgetrimmer(const trimparams _tp) {} u32 trim(uint32_t device) { // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(device); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } hipSetDeviceFlags(hipDeviceScheduleBlockingSync | hipDeviceMapHost); hipMemGetInfo(&free_device_mem, &total_device_mem); fprintf(stderr, "Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "Total amount of device memory: %zu bytes\n", total_device_mem); cudaStatus = hipMalloc((void**)&bufferA, bufferSize); if (cudaStatus != hipSuccess) { fprintf(stderr, "status: %s\n", hipGetErrorString(cudaStatus)); fprintf(stderr, "hipMalloc failed buffer A 4GB!\n"); goto Error; } fprintf(stderr, "Allociating buffer 1\n"); hipMemGetInfo(&free_device_mem, &total_device_mem); //printf("Buffer A: Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "Allociating buffer 2\n"); cudaStatus = hipMalloc((void**)&bufferB, bufferSize2); if (cudaStatus != hipSuccess) { fprintf(stderr, "status: %s\n", hipGetErrorString(cudaStatus)); fprintf(stderr, "hipMalloc failed buffer B 3GB!\n"); goto Error; } cudaStatus = hipMalloc((void**)&indexesE, indexesSize); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed Index array 1!\n"); goto Error; } cudaStatus = hipMalloc((void**)&indexesE2, indexesSize); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed Index array 2!\n"); goto Error; } hipMemGetInfo(&free_device_mem, &total_device_mem); fprintf(stderr, "Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "CUDA device armed\n"); // loop starts here // wait for header hashes, nonce+r { fprintf(stderr, "Trimming: %llx %llx %llx %llx\n", k0, k1, k2, k3); // ack hipMemset(indexesE, 0, indexesSize); hipMemset(indexesE2, 0, indexesSize); hipDeviceSynchronize(); FluffySeed2A << < 512, 64 >> > (k0, k1, k2, k3, (ulonglong4 *)bufferA, (int *)indexesE2); FluffySeed2B << < 32 * BKTGRAN, 64 >> > ((const uint2 *)bufferA, (ulonglong4 *)bufferB, (const int *)indexesE2, (int *)indexesE, 0); hipMemcpy(bufferA, bufferB, bufferSize / 2, hipMemcpyDeviceToDevice); FluffySeed2B << < 32 * BKTGRAN, 64 >> > ((const uint2 *)bufferA, (ulonglong4 *)bufferB, (const int *)indexesE2, (int *)indexesE, 32); cudaStatus = hipMemcpy(&((char *)bufferA)[bufferSize / 2], bufferB, bufferSize / 2, hipMemcpyDeviceToDevice); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) fprintf(stderr, "status memcpy: %s\n", hipGetErrorString(cudaStatus)); hipMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_A_EDGES, DUCK_B_EDGES> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); hipMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); hipMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); hipMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); hipMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); hipDeviceSynchronize(); for (int i = 0; i < 80; i++) { hipMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); hipMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); } hipMemset(indexesE, 0, indexesSize); hipDeviceSynchronize(); FluffyTail << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); hipMemcpy(hostA, indexesE, 64 * 64 * 4, hipMemcpyDeviceToHost); hipDeviceSynchronize(); } cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) fprintf(stderr, "status: %s\n", hipGetErrorString(cudaStatus)); Error: } ~edgetrimmer() { fprintf(stderr, "CUDA terminating...\n"); fprintf(stderr, "#x\n"); hipFree(bufferA); hipFree(bufferB); hipFree(indexesE); hipFree(indexesE2); hipDeviceReset(); } }; int main(int argc, char* argv[]) { int device = 0; if (argc >= 2) device = atoi(argv[1]); return 0; }
9379e124d8beb032288b3b1f2f06f96f4f1722fa.cu
// Cuckoo Cycle, a memory-hard proof-of-work by John Tromp // Copyright (c) 2018 Jiri Vadura - photon // This CUDA part of Theta optimized miner is covered by the FAIR MINING license #include "cuda_profiler_api.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <xmmintrin.h> #include <algorithm> #include <stdio.h> #include <stdint.h> #include <atomic> #include <thread> #include <vector> #include <iostream> #include <fstream> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; typedef u32 node_t; typedef u64 nonce_t; #ifdef _WIN32 #define DUCK_SIZE_A 129LL #define DUCK_SIZE_B 82LL #else #define DUCK_SIZE_A 130LL #define DUCK_SIZE_B 85LL #endif #define DUCK_A_EDGES (DUCK_SIZE_A * 1024LL) #define DUCK_A_EDGES_64 (DUCK_A_EDGES * 64LL) #define DUCK_B_EDGES (DUCK_SIZE_B * 1024LL) #define DUCK_B_EDGES_64 (DUCK_B_EDGES * 64LL) #ifndef EDGEBITS #define EDGEBITS 29 #endif // number of edges #define NEDGES ((node_t)1 << EDGEBITS) // used to mask siphash output #define EDGEMASK (NEDGES - 1) #define CTHREADS 1024 #define BKTMASK4K (4096-1) #define ROTL(x,b) ( ((x) << (b)) | ( (x) >> (64 - (b))) ) #define SIPROUND \ do { \ v0 += v1; v2 += v3; v1 = ROTL(v1,13); \ v3 = ROTL(v3,16); v1 ^= v0; v3 ^= v2; \ v0 = ROTL(v0,32); v2 += v1; v0 += v3; \ v1 = ROTL(v1,17); v3 = ROTL(v3,21); \ v1 ^= v2; v3 ^= v0; v2 = ROTL(v2,32); \ } while(0) __device__ node_t dipnode(const u64 v0i, const u64 v1i, const u64 v2i, const u64 v3i, const nonce_t nce, const u32 uorv) { u64 nonce = 2 * nce + uorv; u64 v0 = v0i, v1 = v1i, v2 = v2i, v3 = v3i ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } __device__ ulonglong4 Pack4edges(const uint2 e1, const uint2 e2, const uint2 e3, const uint2 e4) { u64 r1 = (((u64)e1.y << 32) | ((u64)e1.x)); u64 r2 = (((u64)e2.y << 32) | ((u64)e2.x)); u64 r3 = (((u64)e3.y << 32) | ((u64)e3.x)); u64 r4 = (((u64)e4.y << 32) | ((u64)e4.x)); return make_ulonglong4(r1, r2, r3, r4); } __global__ void FluffySeed2A(const u64 v0i, const u64 v1i, const u64 v2i, const u64 v3i, ulonglong4 * buffer, int * indexes) { const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; __shared__ uint2 tmp[64][15]; __shared__ int counters[64]; counters[lid] = 0; __syncthreads(); for (int i = 0; i < 1024 * 16; i++) { u64 nonce = gid * (1024 * 16) + i; uint2 hash; hash.x = dipnode(v0i, v1i, v2i, v3i, nonce, 0); int bucket = hash.x & (64 - 1); __syncthreads(); int counter = min((int)atomicAdd(counters + bucket, 1), (int)14); hash.y = dipnode(v0i, v1i, v2i, v3i, nonce, 1); tmp[bucket][counter] = hash; __syncthreads(); { int localIdx = min(15, counters[lid]); if (localIdx >= 8) { int newCount = (localIdx - 8); counters[lid] = newCount; { int cnt = min((int)atomicAdd(indexes + lid, 8), (int)(DUCK_A_EDGES_64 - 8)); { buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges(tmp[lid][0], tmp[lid][1], tmp[lid][2], tmp[lid][3]); buffer[(lid * DUCK_A_EDGES_64 + cnt + 4) / 4] = Pack4edges(tmp[lid][4], tmp[lid][5], tmp[lid][6], tmp[lid][7]); } } for (int t = 0; t < newCount; t++) { tmp[lid][t] = tmp[lid][t + 8]; } } } } __syncthreads(); { int localIdx = min(16, counters[lid]); if (localIdx > 0) { int cnt = min((int)atomicAdd(indexes + lid, 4), (int)(DUCK_A_EDGES_64 - 4)); buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges( tmp[lid][0], localIdx > 1 ? tmp[lid][1] : make_uint2(0, 0), localIdx > 2 ? tmp[lid][2] : make_uint2(0, 0), localIdx > 3 ? tmp[lid][3] : make_uint2(0, 0)); } if (localIdx > 4) { int cnt = min((int)atomicAdd(indexes + lid, 4), (int)(DUCK_A_EDGES_64 - 4)); buffer[(lid * DUCK_A_EDGES_64 + cnt) / 4] = Pack4edges( tmp[lid][4], localIdx > 5 ? tmp[lid][5] : make_uint2(0, 0), localIdx > 6 ? tmp[lid][6] : make_uint2(0, 0), localIdx > 7 ? tmp[lid][7] : make_uint2(0, 0)); } } } #define BKTGRAN 32 __global__ void FluffySeed2B(const uint2 * source, ulonglong4 * destination, const int * sourceIndexes, int * destinationIndexes, int startBlock) { //const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; const int group = blockIdx.x; __shared__ uint2 tmp[64][15]; __shared__ int counters[64]; counters[lid] = 0; __syncthreads(); const int offsetMem = startBlock * DUCK_A_EDGES_64; const int myBucket = group / BKTGRAN; const int microBlockNo = group % BKTGRAN; const int bucketEdges = min(sourceIndexes[myBucket + startBlock], (int)(DUCK_A_EDGES_64)); const int microBlockEdgesCount = (DUCK_A_EDGES_64 / BKTGRAN); const int loops = (microBlockEdgesCount / 64); for (int i = 0; i < loops; i++) { int edgeIndex = (microBlockNo * microBlockEdgesCount) + (64 * i) + lid; if (edgeIndex < bucketEdges) { uint2 edge = source[offsetMem + (myBucket * DUCK_A_EDGES_64) + edgeIndex]; if (edge.x == 0 && edge.y == 0) continue; int bucket = (edge.x >> 6) & (64 - 1); __syncthreads(); int counter = min((int)atomicAdd(counters + bucket, 1), (int)14); tmp[bucket][counter] = edge; __syncthreads(); int localIdx = min(15, counters[lid]); if (localIdx >= 8) { int newCount = (localIdx - 8); counters[lid] = newCount; { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 8), (int)(DUCK_A_EDGES - 8)); { destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges(tmp[lid][0], tmp[lid][1], tmp[lid][2], tmp[lid][3]); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt + 4) / 4] = Pack4edges(tmp[lid][4], tmp[lid][5], tmp[lid][6], tmp[lid][7]); } } for (int t = 0; t < newCount; t++) { tmp[lid][t] = tmp[lid][t + 8]; } } } } __syncthreads(); { int localIdx = min(16, counters[lid]); if (localIdx > 0) { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 4), (int)(DUCK_A_EDGES - 4)); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges( tmp[lid][0], localIdx > 1 ? tmp[lid][1] : make_uint2(0, 0), localIdx > 2 ? tmp[lid][2] : make_uint2(0, 0), localIdx > 3 ? tmp[lid][3] : make_uint2(0, 0)); } if (localIdx > 4) { int cnt = min((int)atomicAdd(destinationIndexes + startBlock * 64 + myBucket * 64 + lid, 4), (int)(DUCK_A_EDGES - 4)); destination[((myBucket * 64 + lid) * DUCK_A_EDGES + cnt) / 4] = Pack4edges( tmp[lid][4], localIdx > 5 ? tmp[lid][5] : make_uint2(0, 0), localIdx > 6 ? tmp[lid][6] : make_uint2(0, 0), localIdx > 7 ? tmp[lid][7] : make_uint2(0, 0)); } } } __device__ __forceinline__ void Increase2bCounter(u32 * ecounters, const int bucket) { int word = bucket >> 5; unsigned char bit = bucket & 0x1F; u32 mask = 1 << bit; u32 old = atomicOr(ecounters + word, mask) & mask; if (old > 0) atomicOr(ecounters + word + 4096, mask); } __device__ __forceinline__ bool Read2bCounter(u32 * ecounters, const int bucket) { int word = bucket >> 5; unsigned char bit = bucket & 0x1F; u32 mask = 1 << bit; return (ecounters[word + 4096] & mask) > 0; } template<int bktInSize, int bktOutSize> __global__ void FluffyRound(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes) { //const int gid = blockDim.x * blockIdx.x + threadIdx.x; const int lid = threadIdx.x; const int group = blockIdx.x; __shared__ u32 ecounters[8192]; const int edgesInBucket = min(sourceIndexes[group], bktInSize); const int loops = (edgesInBucket + CTHREADS) / CTHREADS; ecounters[lid] = 0; ecounters[lid + 1024] = 0; ecounters[lid + (1024 * 2)] = 0; ecounters[lid + (1024 * 3)] = 0; ecounters[lid + (1024 * 4)] = 0; ecounters[lid + (1024 * 5)] = 0; ecounters[lid + (1024 * 6)] = 0; ecounters[lid + (1024 * 7)] = 0; __syncthreads(); for (int i = 0; i < loops; i++) { const int lindex = (i * CTHREADS) + lid; if (lindex < edgesInBucket) { const int index = (bktInSize * group) + lindex; uint2 edge = source[index]; if (edge.x == 0 && edge.y == 0) continue; Increase2bCounter(ecounters, (edge.x & EDGEMASK) >> 12); } } __syncthreads(); for (int i = 0; i < loops; i++) { const int lindex = (i * CTHREADS) + lid; if (lindex < edgesInBucket) { const int index = (bktInSize * group) + lindex; uint2 edge = source[index]; if (edge.x == 0 && edge.y == 0) continue; if (Read2bCounter(ecounters, (edge.x & EDGEMASK) >> 12)) { const int bucket = edge.y & BKTMASK4K; const int bktIdx = min(atomicAdd(destinationIndexes + bucket, 1), bktOutSize - 1); destination[(bucket * bktOutSize) + bktIdx] = make_uint2(edge.y, edge.x); } } } } template __global__ void FluffyRound<DUCK_A_EDGES, DUCK_B_EDGES>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES / 2>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 4>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); template __global__ void FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4>(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes); __global__ void /*Magical*/FluffyTail/*Pony*/(const uint2 * source, uint2 * destination, const int * sourceIndexes, int * destinationIndexes) { const int lid = threadIdx.x; const int group = blockIdx.x; int myEdges = sourceIndexes[group]; __shared__ int destIdx; if (lid == 0) destIdx = atomicAdd(destinationIndexes, myEdges); __syncthreads(); if (lid < myEdges) { destination[destIdx + lid] = source[group * DUCK_B_EDGES / 4 + lid]; } } struct blockstpb { u16 blocks; u16 tpb; }; struct trimparams { u16 expand; u16 ntrims; blockstpb genA; blockstpb genB; blockstpb trim; blockstpb tail; blockstpb recover; trimparams() { #ifndef XBITS #define XBITS ((EDGEBITS-16)/2) #endif const static u32 NX = 1 << XBITS; const static u32 NX2 = NX * NX; expand = 0; ntrims = 176; genA.blocks = 4096; genA.tpb = 256; genB.blocks = NX2; genB.tpb = 128; trim.blocks = NX2; trim.tpb = 512; tail.blocks = NX2; tail.tpb = 1024; recover.blocks = 1024; recover.tpb = 1024; } }; struct edgetrimmer { const size_t bufferSize = DUCK_SIZE_A * 1024 * 4096 * 8; const size_t bufferSize2 = DUCK_SIZE_B * 1024 * 4096 * 8; const size_t indexesSize = 128 * 128 * 4; //const unsigned int edges = (1 << 29); int * bufferA; int * bufferB; int * indexesE; int * indexesE2; u32 hostA[256 * 256]; cudaError_t cudaStatus; size_t free_device_mem = 0; size_t total_device_mem = 0; unsigned long long k0 = 0xa34c6a2bdaa03a14ULL; unsigned long long k1 = 0xd736650ae53eee9eULL; unsigned long long k2 = 0x9a22f05e3bffed5eULL; unsigned long long k3 = 0xb8d55478fa3a606dULL; edgetrimmer(const trimparams _tp) {} u32 trim(uint32_t device) { // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(device); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } cudaSetDeviceFlags(cudaDeviceBlockingSync | cudaDeviceMapHost); cudaMemGetInfo(&free_device_mem, &total_device_mem); fprintf(stderr, "Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "Total amount of device memory: %zu bytes\n", total_device_mem); cudaStatus = cudaMalloc((void**)&bufferA, bufferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "status: %s\n", cudaGetErrorString(cudaStatus)); fprintf(stderr, "cudaMalloc failed buffer A 4GB!\n"); goto Error; } fprintf(stderr, "Allociating buffer 1\n"); cudaMemGetInfo(&free_device_mem, &total_device_mem); //printf("Buffer A: Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "Allociating buffer 2\n"); cudaStatus = cudaMalloc((void**)&bufferB, bufferSize2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "status: %s\n", cudaGetErrorString(cudaStatus)); fprintf(stderr, "cudaMalloc failed buffer B 3GB!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&indexesE, indexesSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed Index array 1!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&indexesE2, indexesSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed Index array 2!\n"); goto Error; } cudaMemGetInfo(&free_device_mem, &total_device_mem); fprintf(stderr, "Currently available amount of device memory: %zu bytes\n", free_device_mem); fprintf(stderr, "CUDA device armed\n"); // loop starts here // wait for header hashes, nonce+r { fprintf(stderr, "Trimming: %llx %llx %llx %llx\n", k0, k1, k2, k3); // ack cudaMemset(indexesE, 0, indexesSize); cudaMemset(indexesE2, 0, indexesSize); cudaDeviceSynchronize(); FluffySeed2A << < 512, 64 >> > (k0, k1, k2, k3, (ulonglong4 *)bufferA, (int *)indexesE2); FluffySeed2B << < 32 * BKTGRAN, 64 >> > ((const uint2 *)bufferA, (ulonglong4 *)bufferB, (const int *)indexesE2, (int *)indexesE, 0); cudaMemcpy(bufferA, bufferB, bufferSize / 2, cudaMemcpyDeviceToDevice); FluffySeed2B << < 32 * BKTGRAN, 64 >> > ((const uint2 *)bufferA, (ulonglong4 *)bufferB, (const int *)indexesE2, (int *)indexesE, 32); cudaStatus = cudaMemcpy(&((char *)bufferA)[bufferSize / 2], bufferB, bufferSize / 2, cudaMemcpyDeviceToDevice); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) fprintf(stderr, "status memcpy: %s\n", cudaGetErrorString(cudaStatus)); cudaMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_A_EDGES, DUCK_B_EDGES> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); cudaMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); cudaMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); cudaMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 2> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); cudaMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 2, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); cudaDeviceSynchronize(); for (int i = 0; i < 80; i++) { cudaMemset(indexesE, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); cudaMemset(indexesE2, 0, indexesSize); FluffyRound<DUCK_B_EDGES / 4, DUCK_B_EDGES / 4> << < 4096, 1024 >> > ((const uint2 *)bufferA, (uint2 *)bufferB, (const int *)indexesE, (int *)indexesE2); } cudaMemset(indexesE, 0, indexesSize); cudaDeviceSynchronize(); FluffyTail << < 4096, 1024 >> > ((const uint2 *)bufferB, (uint2 *)bufferA, (const int *)indexesE2, (int *)indexesE); cudaMemcpy(hostA, indexesE, 64 * 64 * 4, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) fprintf(stderr, "status: %s\n", cudaGetErrorString(cudaStatus)); Error: } ~edgetrimmer() { fprintf(stderr, "CUDA terminating...\n"); fprintf(stderr, "#x\n"); cudaFree(bufferA); cudaFree(bufferB); cudaFree(indexesE); cudaFree(indexesE2); cudaDeviceReset(); } }; int main(int argc, char* argv[]) { int device = 0; if (argc >= 2) device = atoi(argv[1]); return 0; }
f9aa2598b644f4edfdd18063b0006c109446913a.hip
// !!! This is a file automatically generated by hipify!!! #include "flamegpu/runtime/utility/EnvironmentManager.cuh" #include <cassert> #include <memory> #include "flamegpu/gpu/detail/CUDAErrorChecking.cuh" #include "flamegpu/runtime/utility/DeviceEnvironment.cuh" #include "flamegpu/model/EnvironmentDescription.h" #include "flamegpu/model/SubEnvironmentData.h" #include "flamegpu/gpu/CUDASimulation.h" #include "flamegpu/util/nvtx.h" namespace flamegpu { /** * Internal namespace to hide __constant__ declarations from modeller */ namespace detail { /** * Managed by HostEnvironment, holds all environment properties */ __constant__ char c_envPropBuffer[EnvironmentManager::MAX_BUFFER_SIZE]; } // namespace detail std::mutex EnvironmentManager::instance_mutex; const char EnvironmentManager::CURVE_NAMESPACE_STRING[23] = "ENVIRONMENT_PROPERTIES"; EnvironmentManager::EnvironmentManager() : CURVE_NAMESPACE_HASH(detail::curve::Curve::variableRuntimeHash(CURVE_NAMESPACE_STRING)), nextFree(0), m_freeSpace(EnvironmentManager::MAX_BUFFER_SIZE), freeFragments(), deviceInitialised(false) { } void EnvironmentManager::purge() { std::unique_lock<std::shared_timed_mutex> lock(mutex); std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceInitialised = false; for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = true; a.second.rtc_update_required = true; a.second.curve_registration_required = true; } deviceRequiresUpdate_lock.unlock(); // We are now able to only purge the device stuff after device reset? // freeFragments.clear(); // m_freeSpace = EnvironmentManager::MAX_BUFFER_SIZE; // nextFree = 0; // cuda_agent_models.clear(); // properties.clear(); // mapped_properties.clear(); // rtc_caches.clear(); initialiseDevice(); } void EnvironmentManager::init(const unsigned int &instance_id, const EnvironmentDescription &desc, bool isPureRTC) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Error if reinit for (auto &&i : properties) { if (i.first.first == instance_id) { THROW exception::EnvDescriptionAlreadyLoaded("Environment description with same instance id '%u' is already loaded, " "in EnvironmentManager::init().", instance_id); } } // Add to device requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceRequiresUpdate.emplace(instance_id, EnvUpdateFlags()); deviceRequiresUpdate_lock.unlock(); // Build a DefragMap to send to defragger method DefragMap orderedProperties; size_t newSize = 0; for (auto _i = desc.properties.begin(); _i != desc.properties.end(); ++_i) { const auto &i = _i->second; NamePair name = toName(instance_id, _i->first); DefragProp prop = DefragProp(i.data.ptr, i.data.length, i.isConst, i.data.elements, i.data.type); const size_t typeSize = i.data.length / i.data.elements; orderedProperties.emplace(std::make_pair(typeSize, name), prop); newSize += i.data.length; } if (newSize > m_freeSpace) { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties," "in EnvironmentManager::init()."); } // Defragment to rebuild it properly defragment(detail::curve::Curve::getInstance(), &orderedProperties, {}, isPureRTC); // Setup RTC version buildRTCOffsets(instance_id, instance_id, orderedProperties); } void EnvironmentManager::init(const unsigned int &instance_id, const EnvironmentDescription &desc, bool isPureRTC, const unsigned int &master_instance_id, const SubEnvironmentData &mapping) { assert(deviceRequiresUpdate.size()); // submodel init should never be called first, requires parent init first for mapping std::unique_lock<std::shared_timed_mutex> lock(mutex); // Add to device requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceRequiresUpdate.emplace(instance_id, EnvUpdateFlags()); deviceRequiresUpdate_lock.unlock(); // Error if reinit for (auto &&i : properties) { if (i.first.first == instance_id) { THROW exception::EnvDescriptionAlreadyLoaded("Environment description with same instance id '%u' is already loaded, " "in EnvironmentManager::init().", instance_id); } } // Build a DefragMap of to send to defragger method DefragMap orderedProperties; size_t newSize = 0; std::set<NamePair> new_mapped_props; for (auto _i = desc.properties.begin(); _i != desc.properties.end(); ++_i) { auto prop_mapping = mapping.properties.find(_i->first); const auto &i = _i->second; NamePair name = toName(instance_id, _i->first); if (prop_mapping == mapping.properties.end()) { // Property is not mapped, so add to defrag map DefragProp prop = DefragProp(i.data.ptr, i.data.length, i.isConst, i.data.elements, i.data.type); const size_t typeSize = i.data.length / i.data.elements; orderedProperties.emplace(std::make_pair(typeSize, name), prop); newSize += i.data.length; } else { // Property is mapped, follow it's mapping upwards until we find the highest parent NamePair ultimateParent = toName(master_instance_id, prop_mapping->second); auto propFind = mapped_properties.find(ultimateParent); while (propFind != mapped_properties.end()) { ultimateParent = propFind->second.masterProp; propFind = mapped_properties.find(ultimateParent); } // Add to mapping list MappedProp mp = MappedProp(ultimateParent, i.isConst); mapped_properties.emplace(name, mp); new_mapped_props.emplace(name); } } if (newSize > m_freeSpace) { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties," "in EnvironmentManager::init()."); } // Defragment to rebuild it properly defragment(detail::curve::Curve::getInstance(), &orderedProperties, new_mapped_props, isPureRTC); // Setup RTC version buildRTCOffsets(instance_id, master_instance_id, orderedProperties); } void EnvironmentManager::initRTC(const CUDASimulation& cudaSimulation) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // check to ensure that model name is not already registered auto res = cuda_agent_models.find(cudaSimulation.getInstanceID()); if (res != cuda_agent_models.end()) { THROW exception::UnknownInternalError("Agent model name '%s' already registered in initRTC()", cudaSimulation.getModelDescription().name.c_str()); } // register model name cuda_agent_models.emplace(cudaSimulation.getInstanceID(), cudaSimulation); } void EnvironmentManager::initialiseDevice() { // Caller must lock mutex if (!deviceInitialised) { void *t_c_buffer = nullptr; gpuErrchk(hipGetSymbolAddress(&t_c_buffer, detail::c_envPropBuffer)); c_buffer = reinterpret_cast<char*>(t_c_buffer); // printf("Env Prop Constant Cache Buffer: %p - %p\n", c_buffer, c_buffer + MAX_BUFFER_SIZE); assert(CURVE_NAMESPACE_HASH == DeviceEnvironment::CURVE_NAMESPACE_HASH()); // Host and Device namespace const's do not match deviceInitialised = true; } } void EnvironmentManager::free(detail::curve::Curve &curve, const unsigned int &instance_id) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Release regular properties for (auto &&i = properties.begin(); i != properties.end();) { if (i->first.first == instance_id) { // Release from CURVE detail::curve::Curve::VariableHash cvh = toHash(i->first); curve.unregisterVariableByHash(cvh); // Drop from properties map i = properties.erase(i); } else { ++i; } } // Release mapped properties for (auto &&i = mapped_properties.begin(); i != mapped_properties.end();) { if (i->first.first == instance_id) { // Release from CURVE detail::curve::Curve::VariableHash cvh = toHash(i->first); curve.unregisterVariableByHash(cvh); // Drop from properties map i = mapped_properties.erase(i); } else { ++i; } } // Defragment to clear up all the buffer items we didn't handle here defragment(curve); // Remove reference to cuda agent model used by RTC // This may not exist if the CUDAgent model has not been created (e.g. some tests which do not run the model) auto cam = cuda_agent_models.find(instance_id); if (cam != cuda_agent_models.end()) { cuda_agent_models.erase(cam); } // Sample applies to requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); auto dru = deviceRequiresUpdate.find(instance_id); if (dru != deviceRequiresUpdate.end()) { deviceRequiresUpdate.erase(dru); } deviceRequiresUpdate_lock.unlock(); // Sample applies to rtc_caches auto rtcc = rtc_caches.find(instance_id); if (rtcc != rtc_caches.end()) { rtc_caches.erase(rtcc); } } EnvironmentManager::NamePair EnvironmentManager::toName(const unsigned int &instance_id, const std::string &var_name) { return std::make_pair(instance_id, var_name); } /** * @note Not static, because eventually we might need to use curve singleton */ detail::curve::Curve::VariableHash EnvironmentManager::toHash(const NamePair &name) const { detail::curve::Curve::VariableHash var_cvh = detail::curve::Curve::variableRuntimeHash(name.second.c_str()); return CURVE_NAMESPACE_HASH + name.first + var_cvh; } void EnvironmentManager::newProperty(const NamePair &name, const char *ptr, const size_t &length, const bool &isConst, const size_type &elements, const std::type_index &type) { std::unique_lock<std::shared_timed_mutex> lock(mutex); assert(elements > 0); const size_t typeSize = (length / elements); ptrdiff_t buffOffset = MAX_BUFFER_SIZE; // Allocate buffer space, using a free fragment for (auto it = freeFragments.begin(); it != freeFragments.end(); ++it) { const ptrdiff_t alignmentOffset = std::get<OFFSET>(*it) % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (std::get<LEN>(*it) + alignmentFix <= length) { // We can use this space! buffOffset = std::get<OFFSET>(*it) + alignmentFix; // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(std::get<OFFSET>(*it), alignmentFix)); } // Update nextFree if (std::get<LEN>(*it) == length) { // Remove freeFragments.erase(it); } else { // Shrink *it = { std::get<OFFSET>(*it) + length, std::get<LEN>(*it) - length }; } break; } } // Allocate buffer space, using nextFree if (buffOffset == MAX_BUFFER_SIZE) { const ptrdiff_t alignmentOffset = nextFree % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (nextFree + alignmentFix + length < MAX_BUFFER_SIZE) { // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(nextFree, alignmentFix)); } // We can use this space! nextFree += alignmentFix; buffOffset = nextFree; nextFree += length; } } if (buffOffset == MAX_BUFFER_SIZE) { // buffOffset hasn't changed from init value // defragment() and retry using nextFree defragment(detail::curve::Curve::getInstance()); const ptrdiff_t alignmentOffset = nextFree % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (nextFree + alignmentFix + length < MAX_BUFFER_SIZE) { // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(nextFree, alignmentFix)); } // We can use this space! nextFree += alignmentFix; buffOffset = nextFree; nextFree += length; } else { // Ran out of constant cache space! THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new property," "in EnvironmentManager::add()."); } } // Update FreeSpace m_freeSpace -= length; // Add to properties // printf("Constant '%s' created at offset: %llu, (%llu%%8), (%llu%%4)\n", name.c_str(), buffOffset, buffOffset % 8, buffOffset % 4); properties.emplace(name, EnvProp(buffOffset, length, isConst, elements, type)); // Store data memcpy(hc_buffer + buffOffset, ptr, length); // Register in cuRVE detail::curve::Curve::VariableHash cvh = toHash(name); const auto CURVE_RESULT = detail::curve::Curve::getInstance().registerVariableByHash(cvh, reinterpret_cast<void*>(buffOffset), typeSize, elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE" "in EnvironmentManager::add()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", name.second.c_str()); } #endif addRTCOffset(name); setDeviceRequiresUpdateFlag(); } #ifdef _DEBUG void EnvironmentManager::defragment(detail::curve::Curve &curve, const DefragMap * mergeProperties, std::set<NamePair> newmaps, bool isPureRTC) { #else void EnvironmentManager::defragment(detail::curve::Curve & curve, const DefragMap * mergeProperties, std::set<NamePair> newmaps, bool) { #endif // Do not lock mutex here, do it in the calling method auto device_lock = std::unique_lock<std::shared_timed_mutex>(device_mutex); // Build a multimap to sort the elements (to create natural alignment in compact form) DefragMap orderedProperties; for (auto &i : properties) { size_t typeLen = i.second.length / i.second.elements; orderedProperties.emplace(std::make_pair(typeLen, i.first), DefragProp(i.second)); } // Include any merge elements if (mergeProperties) { orderedProperties.insert(mergeProperties->cbegin(), mergeProperties->cend()); } // Lock device mutex here, as we begin to mess with curve // Clear freefrags, so we can refill it with alignment junk freeFragments.clear(); size_t spareFrags = 0; // Rebuild properties map into temporary buffer std::unordered_map<NamePair, EnvProp, NamePairHash> t_properties; char t_buffer[MAX_BUFFER_SIZE]; ptrdiff_t buffOffset = 0; // Iterate largest vars first for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { size_t typeSize = _i->first.first; const NamePair &name = _i->first.second; auto &i = _i->second; // Handle alignment const ptrdiff_t alignmentOffset = buffOffset % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (alignmentOffset != 0) { freeFragments.push_back(OffsetLen(buffOffset, alignmentFix)); buffOffset += alignmentFix; spareFrags += alignmentFix; } if (buffOffset + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(t_buffer + buffOffset, i.data, i.length); t_properties.emplace(name, EnvProp(buffOffset, i.length, i.isConst, i.elements, i.type, i.rtc_offset)); // Update cuRVE (There isn't an update, so unregister and reregister) // TODO: curveGetVariableHandle()? detail::curve::Curve::VariableHash cvh = toHash(name); // Only unregister variable if it's already registered if (!mergeProperties) { // Merge properties are only provided on 1st init, when vars can't be unregistered curve.unregisterVariableByHash(cvh); } else { // Can this var be found inside mergeProps auto range = mergeProperties->equal_range(_i->first); bool isFound = false; for (auto w = range.first; w != range.second; ++w) { if (w->first.second == name) { isFound = true; break; } } if (!isFound) { curve.unregisterVariableByHash(cvh); } } const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(buffOffset), typeSize, i.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::defragment()."); } #ifdef _DEBUG if (!isPureRTC && CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", name.second.c_str()); } #endif // Increase buffer offset length that has been added buffOffset += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::defragment()."); } } // Replace stored properties with temp std::swap(properties, t_properties); // Replace buffer on host memcpy(hc_buffer, t_buffer, buffOffset); // Update m_freeSpace, nextFree nextFree = buffOffset; m_freeSpace = MAX_BUFFER_SIZE - buffOffset + spareFrags; // Update cub for any mapped properties for (auto &mp : mapped_properties) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(mp.first); // Unregister the property if it's already been registered if (newmaps.find(mp.first) == newmaps.end()) { curve.unregisterVariableByHash(cvh); } // Find the location of the mappedproperty auto masterprop = properties.at(mp.second.masterProp); // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(masterprop.offset), masterprop.length / masterprop.elements, masterprop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::defragment()."); } #ifdef _DEBUG if (!isPureRTC && CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", mp.first.second.c_str()); } #endif } setDeviceRequiresUpdateFlag(); } void EnvironmentManager::buildRTCOffsets(const unsigned int &instance_id, const unsigned int &master_instance_id, const DefragMap &orderedProperties) { // Do not lock mutex here, do it in the calling method // Actually begin if (instance_id == master_instance_id) { // Create a new cache std::shared_ptr<RTCEnvPropCache> cache = std::make_shared<RTCEnvPropCache>(); // Add the properties, they are already ordered so we can just enforce alignment // As we add each property, set its rtc_offset value in main properties map for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { auto &i = _i->second; size_t alignmentSize = _i->first.first; const NamePair &name = _i->first.second; // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, i.data, i.length); properties.at(name).rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() // This should never happen, it would be caught by defrag sooner THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } // Cache is complete, add it to cache map rtc_caches.emplace(instance_id, cache); } else { // Find the master cache std::shared_ptr<RTCEnvPropCache> &cache = rtc_caches.at(master_instance_id); // Add the properties, they are already ordered so we can just enforce alignment // As we add each property, set its rtc_offset value in main properties map for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { const NamePair &name = _i->first.second; auto &i = _i->second; auto mi_it = mapped_properties.find(name); if (mi_it == mapped_properties.end()) { // Property is not mapped, add it to cache size_t alignmentSize = _i->first.first; // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, i.data, i.length); properties.at(name).rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() // This should never happen, it would be caught by defrag sooner THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } } // Add a copy of cache for this instance_id to env manager rtc_caches.emplace(instance_id, cache); } } char * EnvironmentManager::getRTCCache(const unsigned int& instance_id) { auto it = rtc_caches.find(instance_id); if (it != rtc_caches.end()) return it->second->hc_buffer; THROW exception::UnknownInternalError("Instance with id '%u' not registered in EnvironmentManager for use with RTC in EnvironmentManager::getRTCCache", instance_id); } void EnvironmentManager::addRTCOffset(const NamePair &name) { // Do not lock mutex here, do it in the calling method auto mi_it = mapped_properties.find(name); // Property is not mapped (it's not even currently possible to add mapped properties after the fact) if (mi_it == mapped_properties.end()) { auto &cache = rtc_caches.at(name.first); auto &p = properties.at(name); size_t alignmentSize = p.length > 64 ? 64 : p.length; // This creates better alignment for small vectors // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + p.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, hc_buffer + p.offset, p.length); p.rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += p.length; } else { THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } else { THROW exception::OutOfMemory("Support for mapped (sub) properties is not currently implemented, " "in EnvironmentManager::addRTCOffset()."); } } const CUDASimulation& EnvironmentManager::getCUDASimulation(const unsigned int &instance_id) { // Don't lock mutex here, lock it in the calling function auto res = cuda_agent_models.find(instance_id); if (res == cuda_agent_models.end()) { THROW exception::UnknownInternalError("Instance with id '%u' not registered in EnvironmentManager for use with RTC in EnvironmentManager::getCUDASimulation", instance_id); } return res->second; } void EnvironmentManager::updateRTCValue(const NamePair &name) { // Don't lock mutex here, lock it in the calling function // Grab the updated prop auto a = properties.find(name); if (a == properties.end()) { a = properties.find(mapped_properties.at(name).masterProp); } // Grab the main cache ptr for the prop void *main_ptr = hc_buffer + a->second.offset; // Grab the rtc cache ptr for the prop void *rtc_ptr = rtc_caches.at(name.first)->hc_buffer + a->second.rtc_offset; // Copy memcpy(rtc_ptr, main_ptr, a->second.length); // Now we must detect if the variable is mapped in any form // If this is the case, any rtc models which share the property must be flagged for update too { std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); // First check if it's the subvar auto mp_it = mapped_properties.find(name); const NamePair check = mp_it == mapped_properties.end() ? name : mp_it->second.masterProp; // Now check for any properties mapped to this variable for (auto mp : mapped_properties) { if (mp.second.masterProp == check) { // It's a hit, set flag to true deviceRequiresUpdate.at(check.first).rtc_update_required = true; } } deviceRequiresUpdate_lock.unlock(); } } void EnvironmentManager::removeProperty(const NamePair &name) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Unregister in cuRVE detail::curve::Curve::VariableHash cvh = toHash(name); detail::curve::Curve::getInstance().unregisterVariableByHash(cvh); // Update free space // Remove from properties map auto realprop = properties.find(name); if (realprop!= properties.end()) { auto i = realprop->second; // Cast is safe, length would need to be gigabytes, we only have 64KB constant cache if (i.offset + static_cast<uint32_t>(i.length) == nextFree) { // Rollback nextFree nextFree = i.offset; } else { // Notify free fragments freeFragments.push_back(OffsetLen(i.offset, i.length)); } m_freeSpace += i.length; // Purge properties properties.erase(name); } else { mapped_properties.erase(name); } setDeviceRequiresUpdateFlag(name.first); } void EnvironmentManager::removeProperty(const unsigned int &instance_id, const std::string &var_name) { removeProperty({instance_id, var_name}); } void EnvironmentManager::resetModel(const unsigned int &instance_id, const EnvironmentDescription &desc) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Todo: Might want to change this, so EnvManager holds a copy of the default at init time // For every property, in the named model, which is not a mapped property for (auto &d : desc.getPropertiesMap()) { if (mapped_properties.find({instance_id, d.first}) == mapped_properties.end()) { // Find the local property data auto &p = properties.at({instance_id, d.first}); // Set back to default value memcpy(hc_buffer + p.offset, d.second.data.ptr, d.second.data.length); // Do rtc too void *rtc_ptr = rtc_caches.at(instance_id)->hc_buffer + p.rtc_offset; memcpy(rtc_ptr, d.second.data.ptr, d.second.data.length); assert(d.second.data.length == p.length); } } setDeviceRequiresUpdateFlag(instance_id); } void EnvironmentManager::setDeviceRequiresUpdateFlag(const unsigned int &instance_id) { std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); // Don't lock mutex here, lock it in the calling function // Increment host version if (instance_id == UINT_MAX) { // Set required version for all, we have defragged for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = true; a.second.rtc_update_required = true; } } else { // Set individual auto &flags = deviceRequiresUpdate.at(instance_id); flags.c_update_required = true; flags.rtc_update_required = true; } } void EnvironmentManager::updateDevice(const unsigned int &instance_id) { // Lock shared mutex of mutex in calling method first!!! // Device must be init first assert(deviceInitialised); std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); NVTX_RANGE("EnvironmentManager::updateDevice()"); auto &flags = deviceRequiresUpdate.at(instance_id); auto &c_update_required = flags.c_update_required; auto &rtc_update_required = flags.rtc_update_required; auto &curve_registration_required = flags.curve_registration_required; if (c_update_required) { // Store data gpuErrchk(hipMemcpy(reinterpret_cast<void*>(const_cast<char*>(c_buffer)), reinterpret_cast<void*>(const_cast<char*>(hc_buffer)), MAX_BUFFER_SIZE, hipMemcpyHostToDevice)); // Update C update flag for all instances for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = false; } } if (rtc_update_required) { // RTC is nolonger updated here, it's always updated before the CurveRTCHost is pushed to device. // Update instance's rtc update flag rtc_update_required = false; } if (curve_registration_required) { auto &curve = detail::curve::Curve::getInstance(); // Update cub for any not mapped properties for (auto &p : properties) { if (p.first.first == instance_id) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(p.first); const auto &prop = p.second; // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(prop.offset), prop.length / prop.elements, prop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::updateDevice()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", p.first.second.c_str()); } #endif } } // Update cub for any mapped properties for (auto &mp : mapped_properties) { if (mp.first.first == instance_id) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(mp.first); const auto &masterprop = properties.at(mp.second.masterProp); // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(masterprop.offset), masterprop.length / masterprop.elements, masterprop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::updateDevice()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", mp.first.second.c_str()); } #endif } } curve_registration_required = false; } } EnvironmentManager& EnvironmentManager::getInstance() { auto lock = std::unique_lock<std::mutex>(instance_mutex); // Mutex to protect from two threads triggering the static instantiation concurrently static std::map<int, std::unique_ptr<EnvironmentManager>> instances = {}; // Instantiated on first use. int device_id = -1; gpuErrchk(hipGetDevice(&device_id)); // Can't use operator[] here, constructor is private const auto f = instances.find(device_id); if (f != instances.end()) return *f->second; return *(instances.emplace(device_id, std::unique_ptr<EnvironmentManager>(new EnvironmentManager())).first->second); } util::Any EnvironmentManager::getPropertyAny(const unsigned int &instance_id, const std::string &var_name) const { std::shared_lock<std::shared_timed_mutex> lock(mutex); const NamePair name = toName(instance_id, var_name); auto a = properties.find(name); if (a != properties.end()) return util::Any(hc_buffer + a->second.offset, a->second.length, a->second.type, a->second.elements); const auto b = mapped_properties.find(name); if (b != mapped_properties.end()) { a = properties.find(b->second.masterProp); if (a != properties.end()) return util::Any(hc_buffer + a->second.offset, a->second.length, a->second.type, a->second.elements); THROW exception::InvalidEnvProperty("Mapped environmental property with name '%u:%s' maps to missing property with name '%u:%s', " "in EnvironmentManager::getPropertyAny().", name.first, name.second.c_str(), b->second.masterProp.first, b->second.masterProp.second.c_str()); } THROW exception::InvalidEnvProperty("Environmental property with name '%u:%s' does not exist, " "in EnvironmentManager::getPropertyAny().", name.first, name.second.c_str()); } } // namespace flamegpu
f9aa2598b644f4edfdd18063b0006c109446913a.cu
#include "flamegpu/runtime/utility/EnvironmentManager.cuh" #include <cassert> #include <memory> #include "flamegpu/gpu/detail/CUDAErrorChecking.cuh" #include "flamegpu/runtime/utility/DeviceEnvironment.cuh" #include "flamegpu/model/EnvironmentDescription.h" #include "flamegpu/model/SubEnvironmentData.h" #include "flamegpu/gpu/CUDASimulation.h" #include "flamegpu/util/nvtx.h" namespace flamegpu { /** * Internal namespace to hide __constant__ declarations from modeller */ namespace detail { /** * Managed by HostEnvironment, holds all environment properties */ __constant__ char c_envPropBuffer[EnvironmentManager::MAX_BUFFER_SIZE]; } // namespace detail std::mutex EnvironmentManager::instance_mutex; const char EnvironmentManager::CURVE_NAMESPACE_STRING[23] = "ENVIRONMENT_PROPERTIES"; EnvironmentManager::EnvironmentManager() : CURVE_NAMESPACE_HASH(detail::curve::Curve::variableRuntimeHash(CURVE_NAMESPACE_STRING)), nextFree(0), m_freeSpace(EnvironmentManager::MAX_BUFFER_SIZE), freeFragments(), deviceInitialised(false) { } void EnvironmentManager::purge() { std::unique_lock<std::shared_timed_mutex> lock(mutex); std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceInitialised = false; for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = true; a.second.rtc_update_required = true; a.second.curve_registration_required = true; } deviceRequiresUpdate_lock.unlock(); // We are now able to only purge the device stuff after device reset? // freeFragments.clear(); // m_freeSpace = EnvironmentManager::MAX_BUFFER_SIZE; // nextFree = 0; // cuda_agent_models.clear(); // properties.clear(); // mapped_properties.clear(); // rtc_caches.clear(); initialiseDevice(); } void EnvironmentManager::init(const unsigned int &instance_id, const EnvironmentDescription &desc, bool isPureRTC) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Error if reinit for (auto &&i : properties) { if (i.first.first == instance_id) { THROW exception::EnvDescriptionAlreadyLoaded("Environment description with same instance id '%u' is already loaded, " "in EnvironmentManager::init().", instance_id); } } // Add to device requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceRequiresUpdate.emplace(instance_id, EnvUpdateFlags()); deviceRequiresUpdate_lock.unlock(); // Build a DefragMap to send to defragger method DefragMap orderedProperties; size_t newSize = 0; for (auto _i = desc.properties.begin(); _i != desc.properties.end(); ++_i) { const auto &i = _i->second; NamePair name = toName(instance_id, _i->first); DefragProp prop = DefragProp(i.data.ptr, i.data.length, i.isConst, i.data.elements, i.data.type); const size_t typeSize = i.data.length / i.data.elements; orderedProperties.emplace(std::make_pair(typeSize, name), prop); newSize += i.data.length; } if (newSize > m_freeSpace) { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties," "in EnvironmentManager::init()."); } // Defragment to rebuild it properly defragment(detail::curve::Curve::getInstance(), &orderedProperties, {}, isPureRTC); // Setup RTC version buildRTCOffsets(instance_id, instance_id, orderedProperties); } void EnvironmentManager::init(const unsigned int &instance_id, const EnvironmentDescription &desc, bool isPureRTC, const unsigned int &master_instance_id, const SubEnvironmentData &mapping) { assert(deviceRequiresUpdate.size()); // submodel init should never be called first, requires parent init first for mapping std::unique_lock<std::shared_timed_mutex> lock(mutex); // Add to device requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); deviceRequiresUpdate.emplace(instance_id, EnvUpdateFlags()); deviceRequiresUpdate_lock.unlock(); // Error if reinit for (auto &&i : properties) { if (i.first.first == instance_id) { THROW exception::EnvDescriptionAlreadyLoaded("Environment description with same instance id '%u' is already loaded, " "in EnvironmentManager::init().", instance_id); } } // Build a DefragMap of to send to defragger method DefragMap orderedProperties; size_t newSize = 0; std::set<NamePair> new_mapped_props; for (auto _i = desc.properties.begin(); _i != desc.properties.end(); ++_i) { auto prop_mapping = mapping.properties.find(_i->first); const auto &i = _i->second; NamePair name = toName(instance_id, _i->first); if (prop_mapping == mapping.properties.end()) { // Property is not mapped, so add to defrag map DefragProp prop = DefragProp(i.data.ptr, i.data.length, i.isConst, i.data.elements, i.data.type); const size_t typeSize = i.data.length / i.data.elements; orderedProperties.emplace(std::make_pair(typeSize, name), prop); newSize += i.data.length; } else { // Property is mapped, follow it's mapping upwards until we find the highest parent NamePair ultimateParent = toName(master_instance_id, prop_mapping->second); auto propFind = mapped_properties.find(ultimateParent); while (propFind != mapped_properties.end()) { ultimateParent = propFind->second.masterProp; propFind = mapped_properties.find(ultimateParent); } // Add to mapping list MappedProp mp = MappedProp(ultimateParent, i.isConst); mapped_properties.emplace(name, mp); new_mapped_props.emplace(name); } } if (newSize > m_freeSpace) { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties," "in EnvironmentManager::init()."); } // Defragment to rebuild it properly defragment(detail::curve::Curve::getInstance(), &orderedProperties, new_mapped_props, isPureRTC); // Setup RTC version buildRTCOffsets(instance_id, master_instance_id, orderedProperties); } void EnvironmentManager::initRTC(const CUDASimulation& cudaSimulation) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // check to ensure that model name is not already registered auto res = cuda_agent_models.find(cudaSimulation.getInstanceID()); if (res != cuda_agent_models.end()) { THROW exception::UnknownInternalError("Agent model name '%s' already registered in initRTC()", cudaSimulation.getModelDescription().name.c_str()); } // register model name cuda_agent_models.emplace(cudaSimulation.getInstanceID(), cudaSimulation); } void EnvironmentManager::initialiseDevice() { // Caller must lock mutex if (!deviceInitialised) { void *t_c_buffer = nullptr; gpuErrchk(cudaGetSymbolAddress(&t_c_buffer, detail::c_envPropBuffer)); c_buffer = reinterpret_cast<char*>(t_c_buffer); // printf("Env Prop Constant Cache Buffer: %p - %p\n", c_buffer, c_buffer + MAX_BUFFER_SIZE); assert(CURVE_NAMESPACE_HASH == DeviceEnvironment::CURVE_NAMESPACE_HASH()); // Host and Device namespace const's do not match deviceInitialised = true; } } void EnvironmentManager::free(detail::curve::Curve &curve, const unsigned int &instance_id) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Release regular properties for (auto &&i = properties.begin(); i != properties.end();) { if (i->first.first == instance_id) { // Release from CURVE detail::curve::Curve::VariableHash cvh = toHash(i->first); curve.unregisterVariableByHash(cvh); // Drop from properties map i = properties.erase(i); } else { ++i; } } // Release mapped properties for (auto &&i = mapped_properties.begin(); i != mapped_properties.end();) { if (i->first.first == instance_id) { // Release from CURVE detail::curve::Curve::VariableHash cvh = toHash(i->first); curve.unregisterVariableByHash(cvh); // Drop from properties map i = mapped_properties.erase(i); } else { ++i; } } // Defragment to clear up all the buffer items we didn't handle here defragment(curve); // Remove reference to cuda agent model used by RTC // This may not exist if the CUDAgent model has not been created (e.g. some tests which do not run the model) auto cam = cuda_agent_models.find(instance_id); if (cam != cuda_agent_models.end()) { cuda_agent_models.erase(cam); } // Sample applies to requires update map std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); auto dru = deviceRequiresUpdate.find(instance_id); if (dru != deviceRequiresUpdate.end()) { deviceRequiresUpdate.erase(dru); } deviceRequiresUpdate_lock.unlock(); // Sample applies to rtc_caches auto rtcc = rtc_caches.find(instance_id); if (rtcc != rtc_caches.end()) { rtc_caches.erase(rtcc); } } EnvironmentManager::NamePair EnvironmentManager::toName(const unsigned int &instance_id, const std::string &var_name) { return std::make_pair(instance_id, var_name); } /** * @note Not static, because eventually we might need to use curve singleton */ detail::curve::Curve::VariableHash EnvironmentManager::toHash(const NamePair &name) const { detail::curve::Curve::VariableHash var_cvh = detail::curve::Curve::variableRuntimeHash(name.second.c_str()); return CURVE_NAMESPACE_HASH + name.first + var_cvh; } void EnvironmentManager::newProperty(const NamePair &name, const char *ptr, const size_t &length, const bool &isConst, const size_type &elements, const std::type_index &type) { std::unique_lock<std::shared_timed_mutex> lock(mutex); assert(elements > 0); const size_t typeSize = (length / elements); ptrdiff_t buffOffset = MAX_BUFFER_SIZE; // Allocate buffer space, using a free fragment for (auto it = freeFragments.begin(); it != freeFragments.end(); ++it) { const ptrdiff_t alignmentOffset = std::get<OFFSET>(*it) % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (std::get<LEN>(*it) + alignmentFix <= length) { // We can use this space! buffOffset = std::get<OFFSET>(*it) + alignmentFix; // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(std::get<OFFSET>(*it), alignmentFix)); } // Update nextFree if (std::get<LEN>(*it) == length) { // Remove freeFragments.erase(it); } else { // Shrink *it = { std::get<OFFSET>(*it) + length, std::get<LEN>(*it) - length }; } break; } } // Allocate buffer space, using nextFree if (buffOffset == MAX_BUFFER_SIZE) { const ptrdiff_t alignmentOffset = nextFree % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (nextFree + alignmentFix + length < MAX_BUFFER_SIZE) { // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(nextFree, alignmentFix)); } // We can use this space! nextFree += alignmentFix; buffOffset = nextFree; nextFree += length; } } if (buffOffset == MAX_BUFFER_SIZE) { // buffOffset hasn't changed from init value // defragment() and retry using nextFree defragment(detail::curve::Curve::getInstance()); const ptrdiff_t alignmentOffset = nextFree % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (nextFree + alignmentFix + length < MAX_BUFFER_SIZE) { // Update freeFrags if (alignmentFix != 0) { freeFragments.push_back(OffsetLen(nextFree, alignmentFix)); } // We can use this space! nextFree += alignmentFix; buffOffset = nextFree; nextFree += length; } else { // Ran out of constant cache space! THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new property," "in EnvironmentManager::add()."); } } // Update FreeSpace m_freeSpace -= length; // Add to properties // printf("Constant '%s' created at offset: %llu, (%llu%%8), (%llu%%4)\n", name.c_str(), buffOffset, buffOffset % 8, buffOffset % 4); properties.emplace(name, EnvProp(buffOffset, length, isConst, elements, type)); // Store data memcpy(hc_buffer + buffOffset, ptr, length); // Register in cuRVE detail::curve::Curve::VariableHash cvh = toHash(name); const auto CURVE_RESULT = detail::curve::Curve::getInstance().registerVariableByHash(cvh, reinterpret_cast<void*>(buffOffset), typeSize, elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE" "in EnvironmentManager::add()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", name.second.c_str()); } #endif addRTCOffset(name); setDeviceRequiresUpdateFlag(); } #ifdef _DEBUG void EnvironmentManager::defragment(detail::curve::Curve &curve, const DefragMap * mergeProperties, std::set<NamePair> newmaps, bool isPureRTC) { #else void EnvironmentManager::defragment(detail::curve::Curve & curve, const DefragMap * mergeProperties, std::set<NamePair> newmaps, bool) { #endif // Do not lock mutex here, do it in the calling method auto device_lock = std::unique_lock<std::shared_timed_mutex>(device_mutex); // Build a multimap to sort the elements (to create natural alignment in compact form) DefragMap orderedProperties; for (auto &i : properties) { size_t typeLen = i.second.length / i.second.elements; orderedProperties.emplace(std::make_pair(typeLen, i.first), DefragProp(i.second)); } // Include any merge elements if (mergeProperties) { orderedProperties.insert(mergeProperties->cbegin(), mergeProperties->cend()); } // Lock device mutex here, as we begin to mess with curve // Clear freefrags, so we can refill it with alignment junk freeFragments.clear(); size_t spareFrags = 0; // Rebuild properties map into temporary buffer std::unordered_map<NamePair, EnvProp, NamePairHash> t_properties; char t_buffer[MAX_BUFFER_SIZE]; ptrdiff_t buffOffset = 0; // Iterate largest vars first for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { size_t typeSize = _i->first.first; const NamePair &name = _i->first.second; auto &i = _i->second; // Handle alignment const ptrdiff_t alignmentOffset = buffOffset % typeSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? typeSize - alignmentOffset : 0; if (alignmentOffset != 0) { freeFragments.push_back(OffsetLen(buffOffset, alignmentFix)); buffOffset += alignmentFix; spareFrags += alignmentFix; } if (buffOffset + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(t_buffer + buffOffset, i.data, i.length); t_properties.emplace(name, EnvProp(buffOffset, i.length, i.isConst, i.elements, i.type, i.rtc_offset)); // Update cuRVE (There isn't an update, so unregister and reregister) // TODO: curveGetVariableHandle()? detail::curve::Curve::VariableHash cvh = toHash(name); // Only unregister variable if it's already registered if (!mergeProperties) { // Merge properties are only provided on 1st init, when vars can't be unregistered curve.unregisterVariableByHash(cvh); } else { // Can this var be found inside mergeProps auto range = mergeProperties->equal_range(_i->first); bool isFound = false; for (auto w = range.first; w != range.second; ++w) { if (w->first.second == name) { isFound = true; break; } } if (!isFound) { curve.unregisterVariableByHash(cvh); } } const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(buffOffset), typeSize, i.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::defragment()."); } #ifdef _DEBUG if (!isPureRTC && CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", name.second.c_str()); } #endif // Increase buffer offset length that has been added buffOffset += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::defragment()."); } } // Replace stored properties with temp std::swap(properties, t_properties); // Replace buffer on host memcpy(hc_buffer, t_buffer, buffOffset); // Update m_freeSpace, nextFree nextFree = buffOffset; m_freeSpace = MAX_BUFFER_SIZE - buffOffset + spareFrags; // Update cub for any mapped properties for (auto &mp : mapped_properties) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(mp.first); // Unregister the property if it's already been registered if (newmaps.find(mp.first) == newmaps.end()) { curve.unregisterVariableByHash(cvh); } // Find the location of the mappedproperty auto masterprop = properties.at(mp.second.masterProp); // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(masterprop.offset), masterprop.length / masterprop.elements, masterprop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::defragment()."); } #ifdef _DEBUG if (!isPureRTC && CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", mp.first.second.c_str()); } #endif } setDeviceRequiresUpdateFlag(); } void EnvironmentManager::buildRTCOffsets(const unsigned int &instance_id, const unsigned int &master_instance_id, const DefragMap &orderedProperties) { // Do not lock mutex here, do it in the calling method // Actually begin if (instance_id == master_instance_id) { // Create a new cache std::shared_ptr<RTCEnvPropCache> cache = std::make_shared<RTCEnvPropCache>(); // Add the properties, they are already ordered so we can just enforce alignment // As we add each property, set its rtc_offset value in main properties map for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { auto &i = _i->second; size_t alignmentSize = _i->first.first; const NamePair &name = _i->first.second; // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, i.data, i.length); properties.at(name).rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() // This should never happen, it would be caught by defrag sooner THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } // Cache is complete, add it to cache map rtc_caches.emplace(instance_id, cache); } else { // Find the master cache std::shared_ptr<RTCEnvPropCache> &cache = rtc_caches.at(master_instance_id); // Add the properties, they are already ordered so we can just enforce alignment // As we add each property, set its rtc_offset value in main properties map for (auto _i = orderedProperties.rbegin(); _i != orderedProperties.rend(); ++_i) { const NamePair &name = _i->first.second; auto &i = _i->second; auto mi_it = mapped_properties.find(name); if (mi_it == mapped_properties.end()) { // Property is not mapped, add it to cache size_t alignmentSize = _i->first.first; // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + i.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, i.data, i.length); properties.at(name).rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += i.length; } else { // Ran out of constant cache space! (this can only trigger when a DefragMap is passed) // Arguably this check should be performed by init() // This should never happen, it would be caught by defrag sooner THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } } // Add a copy of cache for this instance_id to env manager rtc_caches.emplace(instance_id, cache); } } char * EnvironmentManager::getRTCCache(const unsigned int& instance_id) { auto it = rtc_caches.find(instance_id); if (it != rtc_caches.end()) return it->second->hc_buffer; THROW exception::UnknownInternalError("Instance with id '%u' not registered in EnvironmentManager for use with RTC in EnvironmentManager::getRTCCache", instance_id); } void EnvironmentManager::addRTCOffset(const NamePair &name) { // Do not lock mutex here, do it in the calling method auto mi_it = mapped_properties.find(name); // Property is not mapped (it's not even currently possible to add mapped properties after the fact) if (mi_it == mapped_properties.end()) { auto &cache = rtc_caches.at(name.first); auto &p = properties.at(name); size_t alignmentSize = p.length > 64 ? 64 : p.length; // This creates better alignment for small vectors // Handle alignment const ptrdiff_t alignmentOffset = cache->nextFree % alignmentSize; const ptrdiff_t alignmentFix = alignmentOffset != 0 ? alignmentSize - alignmentOffset : 0; cache->nextFree += alignmentFix; if (cache->nextFree + p.length <= MAX_BUFFER_SIZE) { // Setup constant in new position memcpy(cache->hc_buffer + cache->nextFree, hc_buffer + p.offset, p.length); p.rtc_offset = cache->nextFree; // Increase buffer offset length that has been added cache->nextFree += p.length; } else { THROW exception::OutOfMemory("Insufficient EnvProperty memory to create new properties, " "in EnvironmentManager::buildRTCOffsets()."); } } else { THROW exception::OutOfMemory("Support for mapped (sub) properties is not currently implemented, " "in EnvironmentManager::addRTCOffset()."); } } const CUDASimulation& EnvironmentManager::getCUDASimulation(const unsigned int &instance_id) { // Don't lock mutex here, lock it in the calling function auto res = cuda_agent_models.find(instance_id); if (res == cuda_agent_models.end()) { THROW exception::UnknownInternalError("Instance with id '%u' not registered in EnvironmentManager for use with RTC in EnvironmentManager::getCUDASimulation", instance_id); } return res->second; } void EnvironmentManager::updateRTCValue(const NamePair &name) { // Don't lock mutex here, lock it in the calling function // Grab the updated prop auto a = properties.find(name); if (a == properties.end()) { a = properties.find(mapped_properties.at(name).masterProp); } // Grab the main cache ptr for the prop void *main_ptr = hc_buffer + a->second.offset; // Grab the rtc cache ptr for the prop void *rtc_ptr = rtc_caches.at(name.first)->hc_buffer + a->second.rtc_offset; // Copy memcpy(rtc_ptr, main_ptr, a->second.length); // Now we must detect if the variable is mapped in any form // If this is the case, any rtc models which share the property must be flagged for update too { std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); // First check if it's the subvar auto mp_it = mapped_properties.find(name); const NamePair check = mp_it == mapped_properties.end() ? name : mp_it->second.masterProp; // Now check for any properties mapped to this variable for (auto mp : mapped_properties) { if (mp.second.masterProp == check) { // It's a hit, set flag to true deviceRequiresUpdate.at(check.first).rtc_update_required = true; } } deviceRequiresUpdate_lock.unlock(); } } void EnvironmentManager::removeProperty(const NamePair &name) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Unregister in cuRVE detail::curve::Curve::VariableHash cvh = toHash(name); detail::curve::Curve::getInstance().unregisterVariableByHash(cvh); // Update free space // Remove from properties map auto realprop = properties.find(name); if (realprop!= properties.end()) { auto i = realprop->second; // Cast is safe, length would need to be gigabytes, we only have 64KB constant cache if (i.offset + static_cast<uint32_t>(i.length) == nextFree) { // Rollback nextFree nextFree = i.offset; } else { // Notify free fragments freeFragments.push_back(OffsetLen(i.offset, i.length)); } m_freeSpace += i.length; // Purge properties properties.erase(name); } else { mapped_properties.erase(name); } setDeviceRequiresUpdateFlag(name.first); } void EnvironmentManager::removeProperty(const unsigned int &instance_id, const std::string &var_name) { removeProperty({instance_id, var_name}); } void EnvironmentManager::resetModel(const unsigned int &instance_id, const EnvironmentDescription &desc) { std::unique_lock<std::shared_timed_mutex> lock(mutex); // Todo: Might want to change this, so EnvManager holds a copy of the default at init time // For every property, in the named model, which is not a mapped property for (auto &d : desc.getPropertiesMap()) { if (mapped_properties.find({instance_id, d.first}) == mapped_properties.end()) { // Find the local property data auto &p = properties.at({instance_id, d.first}); // Set back to default value memcpy(hc_buffer + p.offset, d.second.data.ptr, d.second.data.length); // Do rtc too void *rtc_ptr = rtc_caches.at(instance_id)->hc_buffer + p.rtc_offset; memcpy(rtc_ptr, d.second.data.ptr, d.second.data.length); assert(d.second.data.length == p.length); } } setDeviceRequiresUpdateFlag(instance_id); } void EnvironmentManager::setDeviceRequiresUpdateFlag(const unsigned int &instance_id) { std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); // Don't lock mutex here, lock it in the calling function // Increment host version if (instance_id == UINT_MAX) { // Set required version for all, we have defragged for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = true; a.second.rtc_update_required = true; } } else { // Set individual auto &flags = deviceRequiresUpdate.at(instance_id); flags.c_update_required = true; flags.rtc_update_required = true; } } void EnvironmentManager::updateDevice(const unsigned int &instance_id) { // Lock shared mutex of mutex in calling method first!!! // Device must be init first assert(deviceInitialised); std::unique_lock<std::shared_timed_mutex> deviceRequiresUpdate_lock(deviceRequiresUpdate_mutex); NVTX_RANGE("EnvironmentManager::updateDevice()"); auto &flags = deviceRequiresUpdate.at(instance_id); auto &c_update_required = flags.c_update_required; auto &rtc_update_required = flags.rtc_update_required; auto &curve_registration_required = flags.curve_registration_required; if (c_update_required) { // Store data gpuErrchk(cudaMemcpy(reinterpret_cast<void*>(const_cast<char*>(c_buffer)), reinterpret_cast<void*>(const_cast<char*>(hc_buffer)), MAX_BUFFER_SIZE, cudaMemcpyHostToDevice)); // Update C update flag for all instances for (auto &a : deviceRequiresUpdate) { a.second.c_update_required = false; } } if (rtc_update_required) { // RTC is nolonger updated here, it's always updated before the CurveRTCHost is pushed to device. // Update instance's rtc update flag rtc_update_required = false; } if (curve_registration_required) { auto &curve = detail::curve::Curve::getInstance(); // Update cub for any not mapped properties for (auto &p : properties) { if (p.first.first == instance_id) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(p.first); const auto &prop = p.second; // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(prop.offset), prop.length / prop.elements, prop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::updateDevice()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", p.first.second.c_str()); } #endif } } // Update cub for any mapped properties for (auto &mp : mapped_properties) { if (mp.first.first == instance_id) { // Generate hash for the subproperty name detail::curve::Curve::VariableHash cvh = toHash(mp.first); const auto &masterprop = properties.at(mp.second.masterProp); // Create the mapping const auto CURVE_RESULT = curve.registerVariableByHash(cvh, reinterpret_cast<void*>(masterprop.offset), masterprop.length / masterprop.elements, masterprop.elements); if (CURVE_RESULT == detail::curve::Curve::UNKNOWN_VARIABLE) { THROW exception::CurveException("curveRegisterVariableByHash() returned UNKNOWN_CURVE_VARIABLE, " "in EnvironmentManager::updateDevice()."); } #ifdef _DEBUG if (CURVE_RESULT != static_cast<int>(cvh%detail::curve::Curve::MAX_VARIABLES)) { fprintf(stderr, "Curve Warning: Environment Property '%s' has a collision and may work improperly.\n", mp.first.second.c_str()); } #endif } } curve_registration_required = false; } } EnvironmentManager& EnvironmentManager::getInstance() { auto lock = std::unique_lock<std::mutex>(instance_mutex); // Mutex to protect from two threads triggering the static instantiation concurrently static std::map<int, std::unique_ptr<EnvironmentManager>> instances = {}; // Instantiated on first use. int device_id = -1; gpuErrchk(cudaGetDevice(&device_id)); // Can't use operator[] here, constructor is private const auto f = instances.find(device_id); if (f != instances.end()) return *f->second; return *(instances.emplace(device_id, std::unique_ptr<EnvironmentManager>(new EnvironmentManager())).first->second); } util::Any EnvironmentManager::getPropertyAny(const unsigned int &instance_id, const std::string &var_name) const { std::shared_lock<std::shared_timed_mutex> lock(mutex); const NamePair name = toName(instance_id, var_name); auto a = properties.find(name); if (a != properties.end()) return util::Any(hc_buffer + a->second.offset, a->second.length, a->second.type, a->second.elements); const auto b = mapped_properties.find(name); if (b != mapped_properties.end()) { a = properties.find(b->second.masterProp); if (a != properties.end()) return util::Any(hc_buffer + a->second.offset, a->second.length, a->second.type, a->second.elements); THROW exception::InvalidEnvProperty("Mapped environmental property with name '%u:%s' maps to missing property with name '%u:%s', " "in EnvironmentManager::getPropertyAny().", name.first, name.second.c_str(), b->second.masterProp.first, b->second.masterProp.second.c_str()); } THROW exception::InvalidEnvProperty("Environmental property with name '%u:%s' does not exist, " "in EnvironmentManager::getPropertyAny().", name.first, name.second.c_str()); } } // namespace flamegpu
CellListGPU.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "CellListGPU.cuh" #include "hoomd/extern/util/mgpucontext.h" #include "hoomd/extern/kernels/localitysort.cuh" /*! \file CellListGPU.cu \brief Defines GPU kernel code for cell list generation on the GPU */ //! Kernel that computes the cell list on the GPU /*! \param d_cell_size Number of particles in each cell \param d_xyzf Cell XYZF data array \param d_tdb Cell TDB data array \param d_cell_orientation Particle orientation in cell list \param d_cell_idx Particle index in cell list \param d_conditions Conditions flags for detecting overflow and other error conditions \param d_pos Particle position array \param d_orientation Particle orientation array \param d_charge Particle charge array \param d_diameter Particle diameter array \param d_body Particle body array \param N Number of particles \param n_ghost Number of ghost particles \param Nmax Maximum number of particles that can be placed in a single cell \param flag_charge Set to true to store chage in the flag position in \a d_xyzf \param flag_type Set to true to store type in the flag position in \a d_xyzf \param box Box dimensions \param ci Indexer to compute cell id from cell grid coords \param cli Indexer to index into \a d_xyzf and \a d_tdb \param ghost_width Width of ghost layer \note Optimized for Fermi */ __global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim box, const Index3D ci, const Index2D cli, const Scalar3 ghost_width) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N + n_ghost) return; Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar flag = 0; Scalar diameter = 0; Scalar body = 0; Scalar type = postype.w; Scalar4 orientation = make_scalar4(0,0,0,0); if (d_tdb != NULL) { diameter = d_diameter[idx]; body = __int_as_scalar(d_body[idx]); } if (d_cell_orientation != NULL) { orientation = d_orientation[idx]; } if (flag_charge) flag = d_charge[idx]; else if (flag_type) flag = type; else flag = __int_as_scalar(idx); // check for nan pos if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z)) { (*d_conditions).y = idx+1; return; } uchar3 periodic = box.getPeriodic(); Scalar3 f = box.makeFraction(pos,ghost_width); // check if the particle is inside the unit cell + ghost layer in all dimensions if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) || (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) || (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) ) { // if a ghost particle is out of bounds, silently ignore it if (idx < N) (*d_conditions).z = idx+1; return; } // find the bin each particle belongs in int ib = (int)(f.x * ci.getW()); int jb = (int)(f.y * ci.getH()); int kb = (int)(f.z * ci.getD()); // need to handle the case where the particle is exactly at the box hi if (ib == ci.getW() && periodic.x) ib = 0; if (jb == ci.getH() && periodic.y) jb = 0; if (kb == ci.getD() && periodic.z) kb = 0; unsigned int bin = ci(ib, jb, kb); // all particles should be in a valid cell // all particles should be in a valid cell if (ib < 0 || ib >= (int)ci.getW() || jb < 0 || jb >= (int)ci.getH() || kb < 0 || kb >= (int)ci.getD()) { // but ghost particles that are out of range should not produce an error if (idx < N) (*d_conditions).z = idx+1; return; } unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff); if (size < Nmax) { unsigned int write_pos = cli(size, bin); d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag); if (d_tdb != NULL) d_tdb[write_pos] = make_scalar4(type, diameter, body, 0); if (d_cell_orientation != NULL) d_cell_orientation[write_pos] = orientation; if (d_cell_idx != NULL) d_cell_idx[write_pos] = idx; } else { // handle overflow atomicMax(&(*d_conditions).x, size+1); } } hipError_t gpu_compute_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim& box, const Index3D& ci, const Index2D& cli, const Scalar3& ghost_width, const unsigned int block_size) { hipError_t err; err = hipMemsetAsync(d_cell_size, 0, sizeof(unsigned int)*ci.getNumElements(),0); if (err != hipSuccess) return err; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); int n_blocks = (N+n_ghost)/run_block_size + 1; hipLaunchKernelGGL(( gpu_compute_cell_list_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_cell_size, d_xyzf, d_tdb, d_cell_orientation, d_cell_idx, d_conditions, d_pos, d_orientation, d_charge, d_diameter, d_body, N, n_ghost, Nmax, flag_charge, flag_type, box, ci, cli, ghost_width); return hipSuccess; } // ********************* Following are helper functions, structs, etc for the 1x optimized cell list build //! \internal /*! \param a First element \param b Second element The two elements are swapped */ template<class T> __device__ inline void swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } //! \internal /*! \param shared Pointer to shared memory to bitonic sort */ template<class T, unsigned int block_size> __device__ inline void bitonic_sort(T *shared) { unsigned int tid = threadIdx.x; // Parallel bitonic sort. for (int k = 2; k <= block_size; k *= 2) { // Bitonic merge: for (int j = k / 2; j>0; j /= 2) { int ixj = tid ^ j; if (ixj > tid) { if ((tid & k) == 0) { if (shared[tid] > shared[ixj]) { swap(shared[tid], shared[ixj]); } } else { if (shared[tid] < shared[ixj]) { swap(shared[tid], shared[ixj]); } } } __syncthreads(); } } } //! \internal /*! \brief Pair a particle and its assigned bin together for sorting */ struct bin_id_pair { unsigned int bin; //!< Cell index unsigned int id; //!< Particle id unsigned int start_offset; //!< Write offset }; //! \internal /*! \param bin Cell index \param id Particle id */ __device__ inline bin_id_pair make_bin_id_pair(unsigned int bin, unsigned int id) { bin_id_pair res; res.bin = bin; res.id = id; res.start_offset = 0; return res; } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator< (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id < b.id); else return (a.bin < b.bin); } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator> (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id > b.id); else return (a.bin > b.bin); } //! \internal /*! \param temp Temporary array in shared memory to scan */ template<class T, unsigned int block_size> __device__ inline void scan_naive(T *temp) { int thid = threadIdx.x; int pout = 0; int pin = 1; for (int offset = 1; offset < block_size; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*block_size+thid] = temp[pin*block_size+thid]; if (thid >= offset) temp[pout*block_size+thid] += temp[pin*block_size+thid - offset]; } __syncthreads(); // bring the data back to the initial array if (pout == 1) { pout = 1 - pout; pin = 1 - pout; temp[pout*block_size+thid] = temp[pin*block_size+thid]; __syncthreads(); } } __global__ void gpu_fill_indices_kernel( unsigned int cl_size, uint2 *d_idx, unsigned int *d_sort_permutation, unsigned int *d_cell_idx, unsigned int *d_cell_size, Index3D ci, Index2D cli ) { unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int icell = cell_idx / cli.getW(); unsigned int pidx = UINT_MAX; if (icell < ci.getNumElements()) { unsigned int my_cell_size = d_cell_size[icell]; unsigned int ilocal = cell_idx % cli.getW(); if (ilocal < my_cell_size) { pidx = d_cell_idx[cell_idx]; } } // pack cell idx and particle idx into uint2 uint2 result; result.x = icell; result.y = pidx; // write out result d_idx[cell_idx] = result; // write identity permutation d_sort_permutation[cell_idx] = cell_idx; } //! Lexicographic comparison operator on uint2 struct comp_less_uint2 { __device__ bool operator()(const uint2 a, const uint2 b) { return a.x < b.x || (a.x == b.x && a.y < b.y); } }; __global__ void gpu_apply_sorted_cell_list_order( unsigned int cl_size, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_sort_permutation, Index2D cli) { unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int perm_idx = d_sort_permutation[cell_idx]; d_xyzf_new[cell_idx] = d_xyzf[perm_idx]; if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx]; if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx]; if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx]; } /*! Driver function to sort the cell list on the GPU This applies lexicographical order to cell idx, particle idx pairs \param d_cell_size List of cell sizes \param d_xyzf List of coordinates and flag \param d_tdb List type diameter and body index \param d_sort_idx Temporary array for storing the cell/particle indices to be sorted \param d_sort_permutation Temporary array for storing the permuted cell list indices \param ci Cell indexer \param cli Cell list indexer \param mgpu_context ModernGPU context */ hipError_t gpu_sort_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, uint2 *d_sort_idx, unsigned int *d_sort_permutation, const Index3D ci, const Index2D cli, mgpu::ContextPtr mgpu_context) { unsigned int block_size = 256; // fill indices table with cell idx/particle idx pairs dim3 threads(block_size); dim3 grid(cli.getNumElements()/block_size + 1); hipLaunchKernelGGL(( gpu_fill_indices_kernel), dim3(grid), dim3(threads), 0, 0, cli.getNumElements(), d_sort_idx, d_sort_permutation, d_cell_idx, d_cell_size, ci, cli); // locality sort on those pairs mgpu::LocalitySortPairs(d_sort_idx, d_sort_permutation, cli.getNumElements(), *mgpu_context, comp_less_uint2()); // apply sorted order hipLaunchKernelGGL(( gpu_apply_sorted_cell_list_order), dim3(grid), dim3(threads), 0, 0, cli.getNumElements(), d_cell_idx, d_cell_idx_new, d_xyzf, d_xyzf_new, d_tdb, d_tdb_new, d_cell_orientation, d_cell_orientation_new, d_sort_permutation, cli); // copy back permuted arrays to original ones hipMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); hipMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), hipMemcpyDeviceToDevice); if (d_tdb) { hipMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); } if (d_cell_orientation) { hipMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); } return hipSuccess; }
CellListGPU.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "CellListGPU.cuh" #include "hoomd/extern/util/mgpucontext.h" #include "hoomd/extern/kernels/localitysort.cuh" /*! \file CellListGPU.cu \brief Defines GPU kernel code for cell list generation on the GPU */ //! Kernel that computes the cell list on the GPU /*! \param d_cell_size Number of particles in each cell \param d_xyzf Cell XYZF data array \param d_tdb Cell TDB data array \param d_cell_orientation Particle orientation in cell list \param d_cell_idx Particle index in cell list \param d_conditions Conditions flags for detecting overflow and other error conditions \param d_pos Particle position array \param d_orientation Particle orientation array \param d_charge Particle charge array \param d_diameter Particle diameter array \param d_body Particle body array \param N Number of particles \param n_ghost Number of ghost particles \param Nmax Maximum number of particles that can be placed in a single cell \param flag_charge Set to true to store chage in the flag position in \a d_xyzf \param flag_type Set to true to store type in the flag position in \a d_xyzf \param box Box dimensions \param ci Indexer to compute cell id from cell grid coords \param cli Indexer to index into \a d_xyzf and \a d_tdb \param ghost_width Width of ghost layer \note Optimized for Fermi */ __global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim box, const Index3D ci, const Index2D cli, const Scalar3 ghost_width) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N + n_ghost) return; Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar flag = 0; Scalar diameter = 0; Scalar body = 0; Scalar type = postype.w; Scalar4 orientation = make_scalar4(0,0,0,0); if (d_tdb != NULL) { diameter = d_diameter[idx]; body = __int_as_scalar(d_body[idx]); } if (d_cell_orientation != NULL) { orientation = d_orientation[idx]; } if (flag_charge) flag = d_charge[idx]; else if (flag_type) flag = type; else flag = __int_as_scalar(idx); // check for nan pos if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z)) { (*d_conditions).y = idx+1; return; } uchar3 periodic = box.getPeriodic(); Scalar3 f = box.makeFraction(pos,ghost_width); // check if the particle is inside the unit cell + ghost layer in all dimensions if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) || (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) || (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) ) { // if a ghost particle is out of bounds, silently ignore it if (idx < N) (*d_conditions).z = idx+1; return; } // find the bin each particle belongs in int ib = (int)(f.x * ci.getW()); int jb = (int)(f.y * ci.getH()); int kb = (int)(f.z * ci.getD()); // need to handle the case where the particle is exactly at the box hi if (ib == ci.getW() && periodic.x) ib = 0; if (jb == ci.getH() && periodic.y) jb = 0; if (kb == ci.getD() && periodic.z) kb = 0; unsigned int bin = ci(ib, jb, kb); // all particles should be in a valid cell // all particles should be in a valid cell if (ib < 0 || ib >= (int)ci.getW() || jb < 0 || jb >= (int)ci.getH() || kb < 0 || kb >= (int)ci.getD()) { // but ghost particles that are out of range should not produce an error if (idx < N) (*d_conditions).z = idx+1; return; } unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff); if (size < Nmax) { unsigned int write_pos = cli(size, bin); d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag); if (d_tdb != NULL) d_tdb[write_pos] = make_scalar4(type, diameter, body, 0); if (d_cell_orientation != NULL) d_cell_orientation[write_pos] = orientation; if (d_cell_idx != NULL) d_cell_idx[write_pos] = idx; } else { // handle overflow atomicMax(&(*d_conditions).x, size+1); } } cudaError_t gpu_compute_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim& box, const Index3D& ci, const Index2D& cli, const Scalar3& ghost_width, const unsigned int block_size) { cudaError_t err; err = cudaMemsetAsync(d_cell_size, 0, sizeof(unsigned int)*ci.getNumElements(),0); if (err != cudaSuccess) return err; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); int n_blocks = (N+n_ghost)/run_block_size + 1; gpu_compute_cell_list_kernel<<<n_blocks, run_block_size>>>(d_cell_size, d_xyzf, d_tdb, d_cell_orientation, d_cell_idx, d_conditions, d_pos, d_orientation, d_charge, d_diameter, d_body, N, n_ghost, Nmax, flag_charge, flag_type, box, ci, cli, ghost_width); return cudaSuccess; } // ********************* Following are helper functions, structs, etc for the 1x optimized cell list build //! \internal /*! \param a First element \param b Second element The two elements are swapped */ template<class T> __device__ inline void swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } //! \internal /*! \param shared Pointer to shared memory to bitonic sort */ template<class T, unsigned int block_size> __device__ inline void bitonic_sort(T *shared) { unsigned int tid = threadIdx.x; // Parallel bitonic sort. for (int k = 2; k <= block_size; k *= 2) { // Bitonic merge: for (int j = k / 2; j>0; j /= 2) { int ixj = tid ^ j; if (ixj > tid) { if ((tid & k) == 0) { if (shared[tid] > shared[ixj]) { swap(shared[tid], shared[ixj]); } } else { if (shared[tid] < shared[ixj]) { swap(shared[tid], shared[ixj]); } } } __syncthreads(); } } } //! \internal /*! \brief Pair a particle and its assigned bin together for sorting */ struct bin_id_pair { unsigned int bin; //!< Cell index unsigned int id; //!< Particle id unsigned int start_offset; //!< Write offset }; //! \internal /*! \param bin Cell index \param id Particle id */ __device__ inline bin_id_pair make_bin_id_pair(unsigned int bin, unsigned int id) { bin_id_pair res; res.bin = bin; res.id = id; res.start_offset = 0; return res; } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator< (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id < b.id); else return (a.bin < b.bin); } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator> (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id > b.id); else return (a.bin > b.bin); } //! \internal /*! \param temp Temporary array in shared memory to scan */ template<class T, unsigned int block_size> __device__ inline void scan_naive(T *temp) { int thid = threadIdx.x; int pout = 0; int pin = 1; for (int offset = 1; offset < block_size; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*block_size+thid] = temp[pin*block_size+thid]; if (thid >= offset) temp[pout*block_size+thid] += temp[pin*block_size+thid - offset]; } __syncthreads(); // bring the data back to the initial array if (pout == 1) { pout = 1 - pout; pin = 1 - pout; temp[pout*block_size+thid] = temp[pin*block_size+thid]; __syncthreads(); } } __global__ void gpu_fill_indices_kernel( unsigned int cl_size, uint2 *d_idx, unsigned int *d_sort_permutation, unsigned int *d_cell_idx, unsigned int *d_cell_size, Index3D ci, Index2D cli ) { unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int icell = cell_idx / cli.getW(); unsigned int pidx = UINT_MAX; if (icell < ci.getNumElements()) { unsigned int my_cell_size = d_cell_size[icell]; unsigned int ilocal = cell_idx % cli.getW(); if (ilocal < my_cell_size) { pidx = d_cell_idx[cell_idx]; } } // pack cell idx and particle idx into uint2 uint2 result; result.x = icell; result.y = pidx; // write out result d_idx[cell_idx] = result; // write identity permutation d_sort_permutation[cell_idx] = cell_idx; } //! Lexicographic comparison operator on uint2 struct comp_less_uint2 { __device__ bool operator()(const uint2 a, const uint2 b) { return a.x < b.x || (a.x == b.x && a.y < b.y); } }; __global__ void gpu_apply_sorted_cell_list_order( unsigned int cl_size, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_sort_permutation, Index2D cli) { unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int perm_idx = d_sort_permutation[cell_idx]; d_xyzf_new[cell_idx] = d_xyzf[perm_idx]; if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx]; if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx]; if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx]; } /*! Driver function to sort the cell list on the GPU This applies lexicographical order to cell idx, particle idx pairs \param d_cell_size List of cell sizes \param d_xyzf List of coordinates and flag \param d_tdb List type diameter and body index \param d_sort_idx Temporary array for storing the cell/particle indices to be sorted \param d_sort_permutation Temporary array for storing the permuted cell list indices \param ci Cell indexer \param cli Cell list indexer \param mgpu_context ModernGPU context */ cudaError_t gpu_sort_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, uint2 *d_sort_idx, unsigned int *d_sort_permutation, const Index3D ci, const Index2D cli, mgpu::ContextPtr mgpu_context) { unsigned int block_size = 256; // fill indices table with cell idx/particle idx pairs dim3 threads(block_size); dim3 grid(cli.getNumElements()/block_size + 1); gpu_fill_indices_kernel<<<grid, threads>>> ( cli.getNumElements(), d_sort_idx, d_sort_permutation, d_cell_idx, d_cell_size, ci, cli); // locality sort on those pairs mgpu::LocalitySortPairs(d_sort_idx, d_sort_permutation, cli.getNumElements(), *mgpu_context, comp_less_uint2()); // apply sorted order gpu_apply_sorted_cell_list_order<<<grid, threads>>>( cli.getNumElements(), d_cell_idx, d_cell_idx_new, d_xyzf, d_xyzf_new, d_tdb, d_tdb_new, d_cell_orientation, d_cell_orientation_new, d_sort_permutation, cli); // copy back permuted arrays to original ones cudaMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice); cudaMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), cudaMemcpyDeviceToDevice); if (d_tdb) { cudaMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice); } if (d_cell_orientation) { cudaMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice); } return cudaSuccess; }
CellListGPU.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "CellListGPU.cuh" #include "hoomd/extern/util/mgpucontext.h" #include "hoomd/extern/kernels/localitysort.cuh" /*! \file CellListGPU.cu \brief Defines GPU kernel code for cell list generation on the GPU */ //! Kernel that computes the cell list on the GPU /*! \param d_cell_size Number of particles in each cell \param d_xyzf Cell XYZF data array \param d_tdb Cell TDB data array \param d_cell_orientation Particle orientation in cell list \param d_cell_idx Particle index in cell list \param d_conditions Conditions flags for detecting overflow and other error conditions \param d_pos Particle position array \param d_orientation Particle orientation array \param d_charge Particle charge array \param d_diameter Particle diameter array \param d_body Particle body array \param N Number of particles \param n_ghost Number of ghost particles \param Nmax Maximum number of particles that can be placed in a single cell \param flag_charge Set to true to store chage in the flag position in \a d_xyzf \param flag_type Set to true to store type in the flag position in \a d_xyzf \param box Box dimensions \param ci Indexer to compute cell id from cell grid coords \param cli Indexer to index into \a d_xyzf and \a d_tdb \param ghost_width Width of ghost layer \note Optimized for Fermi */ __global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim box, const Index3D ci, const Index2D cli, const Scalar3 ghost_width) { // read in the particle that belongs to this thread unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N + n_ghost) return; Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar flag = 0; Scalar diameter = 0; Scalar body = 0; Scalar type = postype.w; Scalar4 orientation = make_scalar4(0,0,0,0); if (d_tdb != NULL) { diameter = d_diameter[idx]; body = __int_as_scalar(d_body[idx]); } if (d_cell_orientation != NULL) { orientation = d_orientation[idx]; } if (flag_charge) flag = d_charge[idx]; else if (flag_type) flag = type; else flag = __int_as_scalar(idx); // check for nan pos if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z)) { (*d_conditions).y = idx+1; return; } uchar3 periodic = box.getPeriodic(); Scalar3 f = box.makeFraction(pos,ghost_width); // check if the particle is inside the unit cell + ghost layer in all dimensions if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) || (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) || (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) ) { // if a ghost particle is out of bounds, silently ignore it if (idx < N) (*d_conditions).z = idx+1; return; } // find the bin each particle belongs in int ib = (int)(f.x * ci.getW()); int jb = (int)(f.y * ci.getH()); int kb = (int)(f.z * ci.getD()); // need to handle the case where the particle is exactly at the box hi if (ib == ci.getW() && periodic.x) ib = 0; if (jb == ci.getH() && periodic.y) jb = 0; if (kb == ci.getD() && periodic.z) kb = 0; unsigned int bin = ci(ib, jb, kb); // all particles should be in a valid cell // all particles should be in a valid cell if (ib < 0 || ib >= (int)ci.getW() || jb < 0 || jb >= (int)ci.getH() || kb < 0 || kb >= (int)ci.getD()) { // but ghost particles that are out of range should not produce an error if (idx < N) (*d_conditions).z = idx+1; return; } unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff); if (size < Nmax) { unsigned int write_pos = cli(size, bin); d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag); if (d_tdb != NULL) d_tdb[write_pos] = make_scalar4(type, diameter, body, 0); if (d_cell_orientation != NULL) d_cell_orientation[write_pos] = orientation; if (d_cell_idx != NULL) d_cell_idx[write_pos] = idx; } else { // handle overflow atomicMax(&(*d_conditions).x, size+1); } } hipError_t gpu_compute_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_tdb, Scalar4 *d_cell_orientation, unsigned int *d_cell_idx, uint3 *d_conditions, const Scalar4 *d_pos, const Scalar4 *d_orientation, const Scalar *d_charge, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim& box, const Index3D& ci, const Index2D& cli, const Scalar3& ghost_width, const unsigned int block_size) { hipError_t err; err = hipMemsetAsync(d_cell_size, 0, sizeof(unsigned int)*ci.getNumElements(),0); if (err != hipSuccess) return err; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); int n_blocks = (N+n_ghost)/run_block_size + 1; hipLaunchKernelGGL(( gpu_compute_cell_list_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_cell_size, d_xyzf, d_tdb, d_cell_orientation, d_cell_idx, d_conditions, d_pos, d_orientation, d_charge, d_diameter, d_body, N, n_ghost, Nmax, flag_charge, flag_type, box, ci, cli, ghost_width); return hipSuccess; } // ********************* Following are helper functions, structs, etc for the 1x optimized cell list build //! \internal /*! \param a First element \param b Second element The two elements are swapped */ template<class T> __device__ inline void swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } //! \internal /*! \param shared Pointer to shared memory to bitonic sort */ template<class T, unsigned int block_size> __device__ inline void bitonic_sort(T *shared) { unsigned int tid = threadIdx.x; // Parallel bitonic sort. for (int k = 2; k <= block_size; k *= 2) { // Bitonic merge: for (int j = k / 2; j>0; j /= 2) { int ixj = tid ^ j; if (ixj > tid) { if ((tid & k) == 0) { if (shared[tid] > shared[ixj]) { swap(shared[tid], shared[ixj]); } } else { if (shared[tid] < shared[ixj]) { swap(shared[tid], shared[ixj]); } } } __syncthreads(); } } } //! \internal /*! \brief Pair a particle and its assigned bin together for sorting */ struct bin_id_pair { unsigned int bin; //!< Cell index unsigned int id; //!< Particle id unsigned int start_offset; //!< Write offset }; //! \internal /*! \param bin Cell index \param id Particle id */ __device__ inline bin_id_pair make_bin_id_pair(unsigned int bin, unsigned int id) { bin_id_pair res; res.bin = bin; res.id = id; res.start_offset = 0; return res; } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator< (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id < b.id); else return (a.bin < b.bin); } //! \internal /*! \param a First element \param b Second element */ __device__ inline bool operator> (const bin_id_pair& a, const bin_id_pair& b) { if (a.bin == b.bin) return (a.id > b.id); else return (a.bin > b.bin); } //! \internal /*! \param temp Temporary array in shared memory to scan */ template<class T, unsigned int block_size> __device__ inline void scan_naive(T *temp) { int thid = threadIdx.x; int pout = 0; int pin = 1; for (int offset = 1; offset < block_size; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*block_size+thid] = temp[pin*block_size+thid]; if (thid >= offset) temp[pout*block_size+thid] += temp[pin*block_size+thid - offset]; } __syncthreads(); // bring the data back to the initial array if (pout == 1) { pout = 1 - pout; pin = 1 - pout; temp[pout*block_size+thid] = temp[pin*block_size+thid]; __syncthreads(); } } __global__ void gpu_fill_indices_kernel( unsigned int cl_size, uint2 *d_idx, unsigned int *d_sort_permutation, unsigned int *d_cell_idx, unsigned int *d_cell_size, Index3D ci, Index2D cli ) { unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int icell = cell_idx / cli.getW(); unsigned int pidx = UINT_MAX; if (icell < ci.getNumElements()) { unsigned int my_cell_size = d_cell_size[icell]; unsigned int ilocal = cell_idx % cli.getW(); if (ilocal < my_cell_size) { pidx = d_cell_idx[cell_idx]; } } // pack cell idx and particle idx into uint2 uint2 result; result.x = icell; result.y = pidx; // write out result d_idx[cell_idx] = result; // write identity permutation d_sort_permutation[cell_idx] = cell_idx; } //! Lexicographic comparison operator on uint2 struct comp_less_uint2 { __device__ bool operator()(const uint2 a, const uint2 b) { return a.x < b.x || (a.x == b.x && a.y < b.y); } }; __global__ void gpu_apply_sorted_cell_list_order( unsigned int cl_size, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_sort_permutation, Index2D cli) { unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x; if (cell_idx >= cl_size) return; unsigned int perm_idx = d_sort_permutation[cell_idx]; d_xyzf_new[cell_idx] = d_xyzf[perm_idx]; if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx]; if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx]; if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx]; } /*! Driver function to sort the cell list on the GPU This applies lexicographical order to cell idx, particle idx pairs \param d_cell_size List of cell sizes \param d_xyzf List of coordinates and flag \param d_tdb List type diameter and body index \param d_sort_idx Temporary array for storing the cell/particle indices to be sorted \param d_sort_permutation Temporary array for storing the permuted cell list indices \param ci Cell indexer \param cli Cell list indexer \param mgpu_context ModernGPU context */ hipError_t gpu_sort_cell_list(unsigned int *d_cell_size, Scalar4 *d_xyzf, Scalar4 *d_xyzf_new, Scalar4 *d_tdb, Scalar4 *d_tdb_new, Scalar4 *d_cell_orientation, Scalar4 *d_cell_orientation_new, unsigned int *d_cell_idx, unsigned int *d_cell_idx_new, uint2 *d_sort_idx, unsigned int *d_sort_permutation, const Index3D ci, const Index2D cli, mgpu::ContextPtr mgpu_context) { unsigned int block_size = 256; // fill indices table with cell idx/particle idx pairs dim3 threads(block_size); dim3 grid(cli.getNumElements()/block_size + 1); hipLaunchKernelGGL(( gpu_fill_indices_kernel), dim3(grid), dim3(threads), 0, 0, cli.getNumElements(), d_sort_idx, d_sort_permutation, d_cell_idx, d_cell_size, ci, cli); // locality sort on those pairs mgpu::LocalitySortPairs(d_sort_idx, d_sort_permutation, cli.getNumElements(), *mgpu_context, comp_less_uint2()); // apply sorted order hipLaunchKernelGGL(( gpu_apply_sorted_cell_list_order), dim3(grid), dim3(threads), 0, 0, cli.getNumElements(), d_cell_idx, d_cell_idx_new, d_xyzf, d_xyzf_new, d_tdb, d_tdb_new, d_cell_orientation, d_cell_orientation_new, d_sort_permutation, cli); // copy back permuted arrays to original ones hipMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); hipMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), hipMemcpyDeviceToDevice); if (d_tdb) { hipMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); } if (d_cell_orientation) { hipMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice); } return hipSuccess; }
CellListGPU.cuh
// Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #ifndef __CELLLISTGPU_CUH__ #define __CELLLISTGPU_CUH__ #if defined(ENABLE_HIP) #include <hip/hip_runtime.h> #endif #include "GPUPartition.cuh" #include "HOOMDMath.h" #include "Index1D.h" #include "ParticleData.cuh" /*! \file CellListGPU.cuh \brief Declares GPU kernel code for cell list generation on the GPU */ namespace hoomd { //! Kernel driver for gpu_compute_cell_list_kernel() void gpu_compute_cell_list(unsigned int* d_cell_size, Scalar4* d_xyzf, uint2* d_type_body, Scalar4* d_cell_orientation, unsigned int* d_cell_idx, uint3* d_conditions, const Scalar4* d_pos, const Scalar4* d_orientation, const Scalar* d_charge, const Scalar* d_diameter, const unsigned int* d_body, const unsigned int N, const unsigned int n_ghost, const unsigned int Nmax, const bool flag_charge, const bool flag_type, const BoxDim& box, const Index3D& ci, const Index2D& cli, const Scalar3& ghost_width, const unsigned int block_size, const GPUPartition& gpu_partition); //! Driver function to combine the cell lists from different GPUs into one hipError_t gpu_combine_cell_lists(const unsigned int* d_cell_size_scratch, unsigned int* d_cell_size, const unsigned int* d_idx_scratch, unsigned int* d_idx, const Scalar4* d_xyzf_scratch, Scalar4* d_xyzf, const uint2* d_type_body_scratch, uint2* d_type_body, const Scalar4* d_cell_orientation_scratch, Scalar4* d_cell_orientation, const Index2D cli, unsigned int ngpu, const unsigned int block_size, const unsigned int Nmax, uint3* d_conditions, const GPUPartition& gpu_partition); hipError_t gpu_sort_cell_list(unsigned int* d_cell_size, Scalar4* d_xyzf, Scalar4* d_xyzf_new, uint2* d_type_body, uint2* d_type_body_new, Scalar4* d_cell_orientation, Scalar4* d_cell_orientation_new, unsigned int* d_cell_idx, unsigned int* d_cell_idx_new, uint2* d_sort_idx, unsigned int* d_sort_permutation, const Index3D ci, const Index2D cli); } // end namespace hoomd #endif
ff0a71e86f3bc0da983e758e99c32063edf79f58.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include<string.h> __global__ void fun(int *z){ int x = 5; int a[-x]; //printf("%d\n", a[-x]); } int main(void) { int z; int *dev_z; hipMalloc((void**)&dev_z, sizeof(int)); hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z); hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_z); return 0; }
ff0a71e86f3bc0da983e758e99c32063edf79f58.cu
#include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include<string.h> __global__ void fun(int *z){ int x = 5; int a[-x]; //printf("%d\n", a[-x]); } int main(void) { int z; int *dev_z; cudaMalloc((void**)&dev_z, sizeof(int)); fun<<<1,1>>>(dev_z); cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_z); return 0; }
728163045f391d1d04f27f4f0bf94a256674f2e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) ICG. All rights reserved. * * Institute for Computer Graphics and Vision * Graz University of Technology / Austria * * * This software is distributed WITHOUT ANY WARRANTY; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notices for more information. * * * Project : ImageUtilities * Module : Math * Class : none * Language : C++ * Description : Implementation of Cuda wrappers for statistics functions * * Author : Manuel Werlberger * EMail : werlberger@icg.tugraz.at * */ #ifndef IUMATH_STATISTICS_CU #define IUMATH_STATISTICS_CU #include <iucore/copy.h> #include <iucore/setvalue.h> #include <iucutil.h> #include <iucore/iutextures.cuh> #include "statistics.cuh" #ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS #error Compilation target does not support shared-memory atomics #endif namespace iuprivate { //////////////////////////////////////////////////////////////////////////////// __device__ inline void histogramAtomicAdd(float* address, float value) { float old = value; while ((old = atomicExch(address, atomicExch(address, 0.0f)+old))!=0.0f); } /****************************************************************************** CUDA KERNELS *******************************************************************************/ /* KERNELS FOR MIN/MAX */ // kernel; find min/max; 8u_C1 __global__ void cuMinMaxXKernel_8u_C1(unsigned char* min, unsigned char* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; unsigned char cur_min = tex2D(tex1_8u_C1__, xx, yy); unsigned char cur_max = tex2D(tex1_8u_C1__, xx, yy); // find minima of columns if (x<width) { unsigned char val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_8u_C1__, xx, yy); if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } __global__ void cuMinMaxXKernel2_8u_C1(unsigned char* min, unsigned char* max, int xoff, int yoff, int width, int height, const unsigned char* img, size_t stride) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char cur_min = img[y*stride+x]; unsigned char cur_max = cur_min; // find minima of columns if (x<width) { unsigned char val; for(int y = 1; y < height; ++y) { val = img[y*stride+x]; if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 8u_C4 __global__ void cuMinMaxXKernel_8u_C4(uchar4* min, uchar4* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; uchar4 cur_min = tex2D(tex1_8u_C4__, xx, yy); uchar4 cur_max = tex2D(tex1_8u_C4__, xx, yy); // find minima of columns if (x<width) { uchar4 val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_8u_C4__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.z < cur_min.z) cur_min.z = val.z; if(val.w < cur_min.w) cur_min.w = val.w; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; if(val.z > cur_max.z) cur_max.z = val.z; if(val.w > cur_max.w) cur_max.w = val.w; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C1 __global__ void cuMinMaxXKernel_32f_C1(float* min, float* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float cur_max = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C2 __global__ void cuMinMaxXKernel_32f_C2(float2* min, float2* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float2 cur_min = tex2D(tex1_32f_C2__, xx, yy); float2 cur_max = tex2D(tex1_32f_C2__, xx, yy); // find minima of columns if (x<width) { float2 val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C2__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C4 __global__ void cuMinMaxXKernel_32f_C4(float4* min, float4* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float4 cur_min = tex2D(tex1_32f_C4__, xx, yy); float4 cur_max = tex2D(tex1_32f_C4__, xx, yy); // find minima of columns if (x<width) { float4 val; for(int y = 0; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C4__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.z < cur_min.z) cur_min.z = val.z; if(val.w < cur_min.w) cur_min.w = val.w; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; if(val.z > cur_max.z) cur_max.z = val.z; if(val.w > cur_max.w) cur_max.w = val.w; } min[x] = cur_min; max[x] = cur_max; } } /* KERNELS FOR min + min COORDS */ // kernel; find min + min idx; 32f_C1 __global__ void cuMinXKernel_32f_C1(float* min, unsigned short* min_col_idx, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; unsigned short cur_min_col_idx = 0; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float val; for(unsigned short y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) { cur_min_col_idx = y; cur_min = val; } } min_col_idx[x] = cur_min_col_idx; min[x] = cur_min; } } /* KERNELS FOR MAX + MAX COORDS */ // kernel; find max + max idx; 32f_C1 __global__ void cuMaxXKernel_32f_C1(float* max, float* max_col_idx, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_max_col_idx = 0.0f; float cur_max = tex2D(tex1_32f_C1__, xx, yy); // find minima of columns if (x<width) { float val; for(int y = 0; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val > cur_max) { cur_max_col_idx = (float)y; cur_max = val; } } max_col_idx[x] = cur_max_col_idx; max[x] = cur_max; } } /* KERNELS FOR SUM */ // kernel; compute sum; 8u_C1 __global__ void cuSumColKernel_8u_C1(unsigned char* sum, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_sum = 0.0f; // compute sum of each column if (xx<width+0.5f) { for(int y = yoff; y < height; ++y) { yy = y+0.5f; cur_sum += tex2D(tex1_8u_C1__, xx, yy); } sum[x] = cur_sum; } } // kernel; compute sum; 32f_C1 __global__ void cuSumColKernel_32f_C1(float* sum, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_sum = 0.0f; // compute sum of each column if ((x+xoff)<width) { for(int y = yoff; y < height; ++y) { yy = y+0.5f; cur_sum += tex2D(tex1_32f_C1__, xx, yy); } sum[x] = cur_sum; } } /* KERNELS for NORM OF DIFFERENCES */ // kernel: compute L1 norm; |image1-image2|; __global__ void cuNormDiffL1Kernel_32f_C1(float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = fabs(tex2D(tex1_32f_C1__, xx, yy) - tex2D(tex2_32f_C1__, xx, yy)); } } // kernel: compute L1 norm; |image-value|; __global__ void cuNormDiffValueL1Kernel_32f_C1(float value, float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = fabs(tex2D(tex1_32f_C1__, xx, yy) - value); } } // kernel: compute L2 norm; ||image1-image2||; __global__ void cuNormDiffL2Kernel_32f_C1(float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = iu::sqr(tex2D(tex1_32f_C1__, xx, yy) - tex2D(tex2_32f_C1__, xx, yy)); } } // kernel: compute L2 norm; ||image-value||; __global__ void cuNormDiffValueL2Kernel_32f_C1(float value, float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = iu::sqr(tex2D(tex1_32f_C1__, xx, yy) - value); } } /****************************************************************************** CUDA INTERFACES *******************************************************************************/ /* WRAPPERS FOR MIN/MAX */ // wrapper: find min/max; 8u_C1 IuStatus cuMinMax(const iu::ImageGpu_8u_C1 *src, const IuRect &roi, unsigned char& min_C1, unsigned char& max_C1) { // prepare and bind texture tex1_8u_C1__.filterMode = hipFilterModePoint; tex1_8u_C1__.addressMode[0] = hipAddressModeClamp; tex1_8u_C1__.addressMode[1] = hipAddressModeClamp; tex1_8u_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>(); // printf("bind texture\n"); hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_8u_C1 row_mins(num_row_sums); iu::LinearDeviceMemory_8u_C1 row_maxs(num_row_sums); #if 1 hipLaunchKernelGGL(( cuMinMaxXKernel_8u_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); #else hipLaunchKernelGGL(( cuMinMaxXKernel2_8u_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height, src->data(), src->stride()); #endif iu::LinearHostMemory_8u_C1 h_row_mins(num_row_sums); iu::LinearHostMemory_8u_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C1 = *h_row_mins.data(); max_C1 = *h_row_maxs.data(); for (int i = 0; i < num_row_sums; ++i) { // printf("#%d: %d / %d\n", i, h_row_mins.data(i)[0], h_row_maxs.data(i)[0]); min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); } hipUnbindTexture(&tex1_8u_C1__); // printf("min/max=%d/%d\n", min_C1, max_C1); return iu::checkCudaErrorState(); } // wrapper: find min/max; 8u_C4 IuStatus cuMinMax(const iu::ImageGpu_8u_C4 *src, const IuRect &roi, uchar4& min_C4, uchar4& max_C4) { // prepare and bind texture tex1_8u_C4__.filterMode = hipFilterModePoint; tex1_8u_C4__.addressMode[0] = hipAddressModeClamp; tex1_8u_C4__.addressMode[1] = hipAddressModeClamp; tex1_8u_C4__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar4>(); hipBindTexture2D(0, &tex1_8u_C4__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_8u_C4 row_mins(num_row_sums); iu::LinearDeviceMemory_8u_C4 row_maxs(num_row_sums); hipLaunchKernelGGL(( cuMinMaxXKernel_8u_C4) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_8u_C4 h_row_mins(num_row_sums); iu::LinearHostMemory_8u_C4 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C4 = *h_row_mins.data(0); max_C4 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C4.x = IUMIN(min_C4.x, h_row_mins.data(i)->x); min_C4.y = IUMIN(min_C4.y, h_row_mins.data(i)->y); min_C4.z = IUMIN(min_C4.z, h_row_mins.data(i)->z); min_C4.w = IUMIN(min_C4.w, h_row_mins.data(i)->w); max_C4.x = IUMAX(max_C4.x, h_row_maxs.data(i)->x); max_C4.y = IUMAX(max_C4.y, h_row_maxs.data(i)->y); max_C4.z = IUMAX(max_C4.z, h_row_maxs.data(i)->z); max_C4.w = IUMAX(max_C4.w, h_row_maxs.data(i)->w); } hipUnbindTexture(&tex1_8u_C4__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C1 IuStatus cuMinMax(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& min_C1, float& max_C1) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_32f_C1 row_mins(num_row_sums); iu::LinearDeviceMemory_32f_C1 row_maxs(num_row_sums); hipLaunchKernelGGL(( cuMinMaxXKernel_32f_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_row_mins(num_row_sums); iu::LinearHostMemory_32f_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C1 = *h_row_mins.data(0); max_C1 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); } hipUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } // kernel; find min/max; 32f_C1 __global__ void cuMinMaxXYKernel_32f_C1(float* minim, float* maxim, int width, int height, int m_stride, float* data, int depth, int d_stride, int d_slice_stride) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width && y<height) { float cur_min = data[y*d_stride + x]; float cur_max = cur_min; float val; for(int z = 1; z < depth; z++) { val = data[z*d_slice_stride + y*d_stride + x]; if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } minim[y*m_stride + x] = cur_min; maxim[y*m_stride + x] = cur_max; } } // kernel; find min; 32f_C1 __global__ void cuMinXKernel_32f_C1(float* minim, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+0.5f; float yy = y+0.5f; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) cur_min = val; } minim[x] = cur_min; } } // kernel; find max; 32f_C1 __global__ void cuMaxXKernel_32f_C1(float* maxim, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+0.5f; float yy = y+0.5f; float cur_max = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val > cur_max) cur_max = val; } maxim[x] = cur_max; } } // wrapper: find min/max; 32f_C1 IuStatus cuMinMax(iu::VolumeGpu_32f_C1 *src, float& min_C1, float& max_C1) { iu::ImageGpu_32f_C1 minim(src->width(), src->height()); iu::ImageGpu_32f_C1 maxim(src->width(), src->height()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(src->width(), dimBlock.x), iu::divUp(src->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMinMaxXYKernel_32f_C1), dim3(dimGrid),dim3(dimBlock) , 0, 0, minim.data(), maxim.data(), minim.width(), minim.height(), minim.stride(), src->data(), src->depth(), src->stride(), src->slice_stride()); // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); // fragmentation const unsigned int block_width = 512; dim3 dimBlockX(block_width, 1, 1); dim3 dimGridX(iu::divUp(minim.width(), block_width), 1); // find minimum hipBindTexture2D(0, &tex1_32f_C1__, minim.data(), &channel_desc, minim.width(), minim.height(), minim.pitch()); int num_row_sums = minim.width(); iu::LinearDeviceMemory_32f_C1 row_mins(num_row_sums); hipLaunchKernelGGL(( cuMinXKernel_32f_C1), dim3(dimGridX), dim3(dimBlockX), 0, 0, row_mins.data(), minim.width(), minim.height()); iu::LinearHostMemory_32f_C1 h_row_mins(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); min_C1 = *h_row_mins.data(0); for (int i = 1; i < num_row_sums; ++i) min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); hipUnbindTexture(&tex1_32f_C1__); // find maximum hipBindTexture2D(0, &tex1_32f_C1__, maxim.data(), &channel_desc, maxim.width(), maxim.height(), maxim.pitch()); iu::LinearDeviceMemory_32f_C1 row_maxs(num_row_sums); hipLaunchKernelGGL(( cuMaxXKernel_32f_C1), dim3(dimGridX), dim3(dimBlockX), 0, 0, row_maxs.data(), maxim.width(), maxim.height()); iu::LinearHostMemory_32f_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_maxs, &h_row_maxs); max_C1 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); hipUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C2 IuStatus cuMinMax(const iu::ImageGpu_32f_C2 *src, const IuRect &roi, float2& min_C2, float2& max_C2) { // prepare and bind texture tex1_32f_C2__.filterMode = hipFilterModePoint; tex1_32f_C2__.addressMode[0] = hipAddressModeClamp; tex1_32f_C2__.addressMode[1] = hipAddressModeClamp; tex1_32f_C2__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_32f_C2 row_mins(num_row_sums); iu::LinearDeviceMemory_32f_C2 row_maxs(num_row_sums); hipLaunchKernelGGL(( cuMinMaxXKernel_32f_C2) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C2 h_row_mins(num_row_sums); iu::LinearHostMemory_32f_C2 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C2 = *h_row_mins.data(0); max_C2 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C2.x = IUMIN(min_C2.x, h_row_mins.data(i)->x); min_C2.y = IUMIN(min_C2.y, h_row_mins.data(i)->y); max_C2.x = IUMAX(max_C2.x, h_row_maxs.data(i)->x); max_C2.y = IUMAX(max_C2.y, h_row_maxs.data(i)->y); } hipUnbindTexture(&tex1_32f_C2__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C4 IuStatus cuMinMax(const iu::ImageGpu_32f_C4 *src, const IuRect &roi, float4& min_C4, float4& max_C4) { // prepare and bind texture tex1_32f_C4__.filterMode = hipFilterModePoint; tex1_32f_C4__.addressMode[0] = hipAddressModeClamp; tex1_32f_C4__.addressMode[1] = hipAddressModeClamp; tex1_32f_C4__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>(); hipBindTexture2D(0, &tex1_32f_C4__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C4 row_mins(num_cols); iu::LinearDeviceMemory_32f_C4 row_maxs(num_cols); hipLaunchKernelGGL(( cuMinMaxXKernel_32f_C4) , dim3(dimGridX), dim3(dimBlock) , 0, 0, row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C4 h_row_mins(num_cols); iu::LinearHostMemory_32f_C4 h_row_maxs(num_cols); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C4 = *h_row_mins.data(0); max_C4 = *h_row_maxs.data(0); for (int i = 1; i < num_cols; ++i) { min_C4.x = IUMIN(min_C4.x, h_row_mins.data(i)->x); min_C4.y = IUMIN(min_C4.y, h_row_mins.data(i)->y); min_C4.z = IUMIN(min_C4.z, h_row_mins.data(i)->z); min_C4.w = IUMIN(min_C4.w, h_row_mins.data(i)->w); max_C4.x = IUMAX(max_C4.x, h_row_maxs.data(i)->x); max_C4.y = IUMAX(max_C4.y, h_row_maxs.data(i)->y); max_C4.z = IUMAX(max_C4.z, h_row_maxs.data(i)->z); max_C4.w = IUMAX(max_C4.w, h_row_maxs.data(i)->w); } hipUnbindTexture(&tex1_32f_C4__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR MIN + MIN COORDINATES */ // wrapper: find min + min idx; 32f_C1 IuStatus cuMin(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& min, int& min_x, int& min_y) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C1 col_mins(num_cols); iu::LinearDeviceMemory_16u_C1 col_min_idxs(num_cols); hipLaunchKernelGGL(( cuMinXKernel_32f_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, col_mins.data(), col_min_idxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_mins(num_cols); iuprivate::copy(&col_mins, &h_col_mins); iu::LinearHostMemory_16u_C1 h_col_min_idxs(num_cols); iuprivate::copy(&col_min_idxs, &h_col_min_idxs); min_x = roi.x; min_y = (int)(roi.y + *h_col_min_idxs.data(0)); min = *h_col_mins.data(0); for (int i = 1; i < num_cols; ++i) { if(min > *h_col_mins.data(i)) { min = *h_col_mins.data(i); min_x = roi.x + i; min_y = (int)(roi.y + *h_col_min_idxs.data(i)); } } hipUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR MAX + MAX COORDINATES */ // wrapper: find max + max idx; 32f_C1 IuStatus cuMax(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& max, int& max_x, int& max_y) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C1 col_maxs(num_cols); iu::LinearDeviceMemory_32f_C1 col_max_idxs(num_cols); hipLaunchKernelGGL(( cuMaxXKernel_32f_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, col_maxs.data(), col_max_idxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_max_idxs(num_cols); iu::LinearHostMemory_32f_C1 h_col_maxs(num_cols); iuprivate::copy(&col_max_idxs, &h_col_max_idxs); iuprivate::copy(&col_maxs, &h_col_maxs); max_y = (int)(roi.y + *h_col_max_idxs.data(0)); max = *h_col_maxs.data(0); for (int i = 1; i < num_cols; ++i) { if(max < *h_col_maxs.data(i)) { max = *h_col_maxs.data(i); max_x = roi.x + i; max_y = (int)(roi.y + *h_col_max_idxs.data(i)); } } hipUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR SUM */ // wrapper: compute sum; 8u_C1 IuStatus cuSummation(const iu::ImageGpu_8u_C1 *src, const IuRect &roi, long& sum) { // prepare and bind texture tex1_8u_C1__.filterMode = hipFilterModePoint; tex1_8u_C1__.addressMode[0] = hipAddressModeClamp; tex1_8u_C1__.addressMode[1] = hipAddressModeClamp; tex1_8u_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>(); hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_col_sums = roi.width; iu::LinearDeviceMemory_8u_C1 col_sums(num_col_sums); hipLaunchKernelGGL(( cuSumColKernel_8u_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, col_sums.data(), roi.x, roi.y, roi.width, roi.height); // :TODO: 32f vs 32u? iu::LinearHostMemory_8u_C1 h_col_sums(num_col_sums); iuprivate::copy(&col_sums, &h_col_sums); sum = 0; for (int i = 0; i < num_col_sums; ++i) { sum += static_cast<unsigned int>(*h_col_sums.data(i)); } hipUnbindTexture(&tex1_8u_C1__); return iu::checkCudaErrorState(); } // wrapper: compute sum; 32f_C1 IuStatus cuSummation(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, double& sum) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_col_sums = roi.width; iu::LinearDeviceMemory_32f_C1 col_sums(num_col_sums); hipLaunchKernelGGL(( cuSumColKernel_32f_C1) , dim3(dimGridX), dim3(dimBlock) , 0, 0, col_sums.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_sums(num_col_sums); iuprivate::copy(&col_sums, &h_col_sums); sum = 0.0; for (int i = 0; i < num_col_sums; ++i) { sum += *h_col_sums.data(i); } hipUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS for NORM OF DIFFERENCES */ // wrapper: compute L1 norm; |image1-image2|; IuStatus cuNormDiffL1(const iu::ImageGpu_32f_C1* src1, const iu::ImageGpu_32f_C1* src2, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); // prepare and bind texture tex2_32f_C1__.filterMode = hipFilterModePoint; tex2_32f_C1__.addressMode[0] = hipAddressModeClamp; tex2_32f_C1__.addressMode[1] = hipAddressModeClamp; tex2_32f_C1__.normalized = false; hipBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); hipLaunchKernelGGL(( cuNormDiffL1Kernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L1 norm; |image1-value|; IuStatus cuNormDiffL1(const iu::ImageGpu_32f_C1* src, const float& value, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); hipLaunchKernelGGL(( cuNormDiffValueL1Kernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, value, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L2 norm; ||image1-image2||; IuStatus cuNormDiffL2(const iu::ImageGpu_32f_C1* src1, const iu::ImageGpu_32f_C1* src2, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); // prepare and bind texture tex2_32f_C1__.filterMode = hipFilterModePoint; tex2_32f_C1__.addressMode[0] = hipAddressModeClamp; tex2_32f_C1__.addressMode[1] = hipAddressModeClamp; tex2_32f_C1__.normalized = false; hipBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); hipLaunchKernelGGL(( cuNormDiffL2Kernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L2 norm; ||image1-value||; IuStatus cuNormDiffL2(const iu::ImageGpu_32f_C1* src, const float& value, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = hipFilterModePoint; tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); hipLaunchKernelGGL(( cuNormDiffValueL2Kernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, value, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } /* WRAPPERS for ERROR MEASUREMENTS */ // kernel: compute MSE __global__ void cuMseKernel(float* dst, size_t stride, int xoff, int yoff, int width, int height) { // calculate absolute texture coordinates const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if ((x < width) && (y < height)) { float diff = tex2D(tex1_32f_C1__, x+xoff+0.5f, y+yoff+0.5f) - tex2D(tex2_32f_C1__, x+xoff+0.5f, y+yoff+0.5f); dst[y*stride + x] = diff*diff; } } // wrapper: compute MSE IuStatus cuMse(const iu::ImageGpu_32f_C1* src, const iu::ImageGpu_32f_C1* reference, const IuRect& roi, double& mse) { tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.filterMode = hipFilterModeLinear; tex1_32f_C1__.normalized = false; tex2_32f_C1__.addressMode[0] = hipAddressModeClamp; tex2_32f_C1__.addressMode[1] = hipAddressModeClamp; tex2_32f_C1__.filterMode = hipFilterModeLinear; tex2_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); hipBindTexture2D(0, &tex2_32f_C1__, reference->data(), &channel_desc, reference->width(), reference->height(), reference->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 tmp(roi.width, roi.height); iuprivate::setValue(0.0f, &tmp, tmp.roi()); hipLaunchKernelGGL(( cuMseKernel) , dim3(dimGrid),dim3(dimBlock) , 0, 0, tmp.data(), tmp.stride(), roi.x, roi.y, roi.width, roi.height); double sum = 0.0; cuSummation(&tmp, tmp.roi(), sum); mse = sum/(static_cast<float>(roi.width*roi.height)); return iu::checkCudaErrorState(); } // kernel: compute SSIM __global__ void cuSsimKernel(float c1, float c2, float* dst, size_t stride, int xoff, int yoff, int width, int height) { // calculate absolute texture coordinates const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if ((x < width) && (y < height)) { int hkl = -4; int hkr = 4; // Calc means float mu_in = 0.0f; float mu_ref = 0.0f; float n = 0.0f; for (int dx=hkl; dx<=hkr; dx++) { for (int dy=hkl; dy<=hkr; dy++) { mu_in += tex2D(tex1_32f_C1__, x+dx+0.5f, y+dy+0.5f); mu_ref += tex2D(tex2_32f_C1__, x+dx+0.5f, y+dy+0.5f); n++; } } mu_in /= n; mu_ref /= n; // Calc variance and covariance float sigma_in = 0.0f; float sigma_ref = 0.0f; float cov = 0.0f; for (int dx=hkl; dx<=hkr; dx++) { for (int dy=hkl; dy<=hkr; dy++) { float in = tex2D(tex1_32f_C1__, x+dx+0.5f, y+dy+0.5f) - mu_in; float ref = tex2D(tex2_32f_C1__, x+dx+0.5f, y+dy+0.5f) - mu_ref; sigma_in += in*in; sigma_ref += ref*ref; cov += in*ref; } } sigma_in /= n-1.0f; sigma_ref /= n-1.0f; cov /= n-1.0f; // Calculate Structural similarity dst[y*stride + x] = (2.0f*mu_in*mu_ref + c1)*(2.0f*cov + c2)/((mu_in*mu_in + mu_ref*mu_ref + c1)*(sigma_in + sigma_ref + c2)); } } // wrapper: compute SSIM IuStatus cuSsim(const iu::ImageGpu_32f_C1* src, const iu::ImageGpu_32f_C1* reference, const IuRect& roi, double& ssim) { tex1_32f_C1__.addressMode[0] = hipAddressModeClamp; tex1_32f_C1__.addressMode[1] = hipAddressModeClamp; tex1_32f_C1__.filterMode = hipFilterModeLinear; tex1_32f_C1__.normalized = false; tex2_32f_C1__.addressMode[0] = hipAddressModeClamp; tex2_32f_C1__.addressMode[1] = hipAddressModeClamp; tex2_32f_C1__.filterMode = hipFilterModeLinear; tex2_32f_C1__.normalized = false; hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float1>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); hipBindTexture2D(0, &tex2_32f_C1__, reference->data(), &channel_desc, reference->width(), reference->height(), reference->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 tmp(roi.width, roi.height); iuprivate::setValue(0.0f, &tmp, tmp.roi()); float k1 = 0.01f; float k2 = 0.03f; float dynamic_range = 1.0f; float c1 = (k1*dynamic_range)*(k1*dynamic_range); float c2 = (k2*dynamic_range)*(k2*dynamic_range); hipLaunchKernelGGL(( cuSsimKernel) , dim3(dimGrid),dim3(dimBlock) , 0, 0, c1, c2, tmp.data(), tmp.stride(), roi.x, roi.y, roi.width, roi.height); double sum = 0.0; cuSummation(&tmp, tmp.roi(), sum); ssim = ssim/(static_cast<float>(roi.width*roi.height)); return iu::checkCudaErrorState(); } // kernel: color histogram __global__ void cuColorHistogramKernel(float* hist, int width, int height, int hstrideX, int hstrideXY, unsigned char mask_val) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<width && y<height) { if (tex2D(tex1_8u_C1__, x+0.5f, y+0.5f) == mask_val) { #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 200 uchar4 bins = tex2D(tex1_8u_C4__, x+0.5f, y+0.5f); int hc = bins.x + bins.y*hstrideX + bins.z*hstrideXY; atomicAdd(&hist[hc], 1.0f); #else #if __CUDA_ARCH__ >= 120 uchar4 bins = tex2D(tex1_8u_C4__, x+0.5f, y+0.5f); int hc = bins.x + bins.y*hstrideX + bins.z*hstrideXY; histogramAtomicAdd(&hist[hc], 1.0f); #else #if !WIN32 #warning Color Histograms will not work: >= sm_12 needed! #endif #endif #endif } } } // wrapper: color histogram IuStatus cuColorHistogram(const iu::ImageGpu_8u_C4* binned_image, const iu::ImageGpu_8u_C1* mask, iu::VolumeGpu_32f_C1* hist, unsigned char mask_val) { tex1_8u_C4__.addressMode[0] = hipAddressModeClamp; tex1_8u_C4__.addressMode[1] = hipAddressModeClamp; tex1_8u_C4__.filterMode = hipFilterModePoint; tex1_8u_C4__.normalized = false; tex1_8u_C1__.addressMode[0] = hipAddressModeClamp; tex1_8u_C1__.addressMode[1] = hipAddressModeClamp; tex1_8u_C1__.filterMode = hipFilterModePoint; tex1_8u_C1__.normalized = false; setValue(0.0f, hist, hist->roi()); hipChannelFormatDesc channel_desc_c4 = hipCreateChannelDesc<uchar4>(); hipBindTexture2D(0, &tex1_8u_C4__, binned_image->data(), &channel_desc_c4, binned_image->width(), binned_image->height(), binned_image->pitch()); hipChannelFormatDesc channel_desc_c1 = hipCreateChannelDesc<unsigned char>(); hipBindTexture2D(0, &tex1_8u_C1__, mask->data(), &channel_desc_c1, mask->width(), mask->height(), mask->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(binned_image->width(), dimBlock.x), iu::divUp(binned_image->height(), dimBlock.y)); hipLaunchKernelGGL(( cuColorHistogramKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, hist->data(), binned_image->width(), binned_image->height(), hist->stride(), hist->slice_stride(), mask_val); return iu::checkCudaErrorState(); } } // namespace iuprivate #endif // IUMATH_STATISTICS_CU
728163045f391d1d04f27f4f0bf94a256674f2e0.cu
/* * Copyright (c) ICG. All rights reserved. * * Institute for Computer Graphics and Vision * Graz University of Technology / Austria * * * This software is distributed WITHOUT ANY WARRANTY; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notices for more information. * * * Project : ImageUtilities * Module : Math * Class : none * Language : C++ * Description : Implementation of Cuda wrappers for statistics functions * * Author : Manuel Werlberger * EMail : werlberger@icg.tugraz.at * */ #ifndef IUMATH_STATISTICS_CU #define IUMATH_STATISTICS_CU #include <iucore/copy.h> #include <iucore/setvalue.h> #include <iucutil.h> #include <iucore/iutextures.cuh> #include "statistics.cuh" #ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS #error Compilation target does not support shared-memory atomics #endif namespace iuprivate { //////////////////////////////////////////////////////////////////////////////// __device__ inline void histogramAtomicAdd(float* address, float value) { float old = value; while ((old = atomicExch(address, atomicExch(address, 0.0f)+old))!=0.0f); } /****************************************************************************** CUDA KERNELS *******************************************************************************/ /* KERNELS FOR MIN/MAX */ // kernel; find min/max; 8u_C1 __global__ void cuMinMaxXKernel_8u_C1(unsigned char* min, unsigned char* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; unsigned char cur_min = tex2D(tex1_8u_C1__, xx, yy); unsigned char cur_max = tex2D(tex1_8u_C1__, xx, yy); // find minima of columns if (x<width) { unsigned char val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_8u_C1__, xx, yy); if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } __global__ void cuMinMaxXKernel2_8u_C1(unsigned char* min, unsigned char* max, int xoff, int yoff, int width, int height, const unsigned char* img, size_t stride) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char cur_min = img[y*stride+x]; unsigned char cur_max = cur_min; // find minima of columns if (x<width) { unsigned char val; for(int y = 1; y < height; ++y) { val = img[y*stride+x]; if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 8u_C4 __global__ void cuMinMaxXKernel_8u_C4(uchar4* min, uchar4* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; uchar4 cur_min = tex2D(tex1_8u_C4__, xx, yy); uchar4 cur_max = tex2D(tex1_8u_C4__, xx, yy); // find minima of columns if (x<width) { uchar4 val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_8u_C4__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.z < cur_min.z) cur_min.z = val.z; if(val.w < cur_min.w) cur_min.w = val.w; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; if(val.z > cur_max.z) cur_max.z = val.z; if(val.w > cur_max.w) cur_max.w = val.w; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C1 __global__ void cuMinMaxXKernel_32f_C1(float* min, float* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float cur_max = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C2 __global__ void cuMinMaxXKernel_32f_C2(float2* min, float2* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float2 cur_min = tex2D(tex1_32f_C2__, xx, yy); float2 cur_max = tex2D(tex1_32f_C2__, xx, yy); // find minima of columns if (x<width) { float2 val; for(int y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C2__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; } min[x] = cur_min; max[x] = cur_max; } } // kernel; find min/max; 32f_C4 __global__ void cuMinMaxXKernel_32f_C4(float4* min, float4* max, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float4 cur_min = tex2D(tex1_32f_C4__, xx, yy); float4 cur_max = tex2D(tex1_32f_C4__, xx, yy); // find minima of columns if (x<width) { float4 val; for(int y = 0; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C4__, xx, yy); if(val.x < cur_min.x) cur_min.x = val.x; if(val.y < cur_min.y) cur_min.y = val.y; if(val.z < cur_min.z) cur_min.z = val.z; if(val.w < cur_min.w) cur_min.w = val.w; if(val.x > cur_max.x) cur_max.x = val.x; if(val.y > cur_max.y) cur_max.y = val.y; if(val.z > cur_max.z) cur_max.z = val.z; if(val.w > cur_max.w) cur_max.w = val.w; } min[x] = cur_min; max[x] = cur_max; } } /* KERNELS FOR min + min COORDS */ // kernel; find min + min idx; 32f_C1 __global__ void cuMinXKernel_32f_C1(float* min, unsigned short* min_col_idx, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; unsigned short cur_min_col_idx = 0; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float val; for(unsigned short y = 1; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) { cur_min_col_idx = y; cur_min = val; } } min_col_idx[x] = cur_min_col_idx; min[x] = cur_min; } } /* KERNELS FOR MAX + MAX COORDS */ // kernel; find max + max idx; 32f_C1 __global__ void cuMaxXKernel_32f_C1(float* max, float* max_col_idx, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_max_col_idx = 0.0f; float cur_max = tex2D(tex1_32f_C1__, xx, yy); // find minima of columns if (x<width) { float val; for(int y = 0; y < height; ++y) { yy = y+yoff+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val > cur_max) { cur_max_col_idx = (float)y; cur_max = val; } } max_col_idx[x] = cur_max_col_idx; max[x] = cur_max; } } /* KERNELS FOR SUM */ // kernel; compute sum; 8u_C1 __global__ void cuSumColKernel_8u_C1(unsigned char* sum, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_sum = 0.0f; // compute sum of each column if (xx<width+0.5f) { for(int y = yoff; y < height; ++y) { yy = y+0.5f; cur_sum += tex2D(tex1_8u_C1__, xx, yy); } sum[x] = cur_sum; } } // kernel; compute sum; 32f_C1 __global__ void cuSumColKernel_32f_C1(float* sum, int xoff, int yoff, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float xx = x+xoff+0.5f; float yy = y+yoff+0.5f; float cur_sum = 0.0f; // compute sum of each column if ((x+xoff)<width) { for(int y = yoff; y < height; ++y) { yy = y+0.5f; cur_sum += tex2D(tex1_32f_C1__, xx, yy); } sum[x] = cur_sum; } } /* KERNELS for NORM OF DIFFERENCES */ // kernel: compute L1 norm; |image1-image2|; __global__ void cuNormDiffL1Kernel_32f_C1(float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = fabs(tex2D(tex1_32f_C1__, xx, yy) - tex2D(tex2_32f_C1__, xx, yy)); } } // kernel: compute L1 norm; |image-value|; __global__ void cuNormDiffValueL1Kernel_32f_C1(float value, float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = fabs(tex2D(tex1_32f_C1__, xx, yy) - value); } } // kernel: compute L2 norm; ||image1-image2||; __global__ void cuNormDiffL2Kernel_32f_C1(float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = iu::sqr(tex2D(tex1_32f_C1__, xx, yy) - tex2D(tex2_32f_C1__, xx, yy)); } } // kernel: compute L2 norm; ||image-value||; __global__ void cuNormDiffValueL2Kernel_32f_C1(float value, float* dst, size_t stride, int xoff, int yoff, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = iu::sqr(tex2D(tex1_32f_C1__, xx, yy) - value); } } /****************************************************************************** CUDA INTERFACES *******************************************************************************/ /* WRAPPERS FOR MIN/MAX */ // wrapper: find min/max; 8u_C1 IuStatus cuMinMax(const iu::ImageGpu_8u_C1 *src, const IuRect &roi, unsigned char& min_C1, unsigned char& max_C1) { // prepare and bind texture tex1_8u_C1__.filterMode = cudaFilterModePoint; tex1_8u_C1__.addressMode[0] = cudaAddressModeClamp; tex1_8u_C1__.addressMode[1] = cudaAddressModeClamp; tex1_8u_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>(); // printf("bind texture\n"); cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_8u_C1 row_mins(num_row_sums); iu::LinearDeviceMemory_8u_C1 row_maxs(num_row_sums); #if 1 cuMinMaxXKernel_8u_C1 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); #else cuMinMaxXKernel2_8u_C1 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height, src->data(), src->stride()); #endif iu::LinearHostMemory_8u_C1 h_row_mins(num_row_sums); iu::LinearHostMemory_8u_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C1 = *h_row_mins.data(); max_C1 = *h_row_maxs.data(); for (int i = 0; i < num_row_sums; ++i) { // printf("#%d: %d / %d\n", i, h_row_mins.data(i)[0], h_row_maxs.data(i)[0]); min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); } cudaUnbindTexture(&tex1_8u_C1__); // printf("min/max=%d/%d\n", min_C1, max_C1); return iu::checkCudaErrorState(); } // wrapper: find min/max; 8u_C4 IuStatus cuMinMax(const iu::ImageGpu_8u_C4 *src, const IuRect &roi, uchar4& min_C4, uchar4& max_C4) { // prepare and bind texture tex1_8u_C4__.filterMode = cudaFilterModePoint; tex1_8u_C4__.addressMode[0] = cudaAddressModeClamp; tex1_8u_C4__.addressMode[1] = cudaAddressModeClamp; tex1_8u_C4__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar4>(); cudaBindTexture2D(0, &tex1_8u_C4__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_8u_C4 row_mins(num_row_sums); iu::LinearDeviceMemory_8u_C4 row_maxs(num_row_sums); cuMinMaxXKernel_8u_C4 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_8u_C4 h_row_mins(num_row_sums); iu::LinearHostMemory_8u_C4 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C4 = *h_row_mins.data(0); max_C4 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C4.x = IUMIN(min_C4.x, h_row_mins.data(i)->x); min_C4.y = IUMIN(min_C4.y, h_row_mins.data(i)->y); min_C4.z = IUMIN(min_C4.z, h_row_mins.data(i)->z); min_C4.w = IUMIN(min_C4.w, h_row_mins.data(i)->w); max_C4.x = IUMAX(max_C4.x, h_row_maxs.data(i)->x); max_C4.y = IUMAX(max_C4.y, h_row_maxs.data(i)->y); max_C4.z = IUMAX(max_C4.z, h_row_maxs.data(i)->z); max_C4.w = IUMAX(max_C4.w, h_row_maxs.data(i)->w); } cudaUnbindTexture(&tex1_8u_C4__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C1 IuStatus cuMinMax(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& min_C1, float& max_C1) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_32f_C1 row_mins(num_row_sums); iu::LinearDeviceMemory_32f_C1 row_maxs(num_row_sums); cuMinMaxXKernel_32f_C1 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_row_mins(num_row_sums); iu::LinearHostMemory_32f_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C1 = *h_row_mins.data(0); max_C1 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); } cudaUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } // kernel; find min/max; 32f_C1 __global__ void cuMinMaxXYKernel_32f_C1(float* minim, float* maxim, int width, int height, int m_stride, float* data, int depth, int d_stride, int d_slice_stride) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width && y<height) { float cur_min = data[y*d_stride + x]; float cur_max = cur_min; float val; for(int z = 1; z < depth; z++) { val = data[z*d_slice_stride + y*d_stride + x]; if(val < cur_min) cur_min = val; if(val > cur_max) cur_max = val; } minim[y*m_stride + x] = cur_min; maxim[y*m_stride + x] = cur_max; } } // kernel; find min; 32f_C1 __global__ void cuMinXKernel_32f_C1(float* minim, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+0.5f; float yy = y+0.5f; float cur_min = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val < cur_min) cur_min = val; } minim[x] = cur_min; } } // kernel; find max; 32f_C1 __global__ void cuMaxXKernel_32f_C1(float* maxim, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; // find minima of columns if (x<width) { float xx = x+0.5f; float yy = y+0.5f; float cur_max = tex2D(tex1_32f_C1__, xx, yy); float val; for(int y = 1; y < height; ++y) { yy = y+0.5f; val = tex2D(tex1_32f_C1__, xx, yy); if(val > cur_max) cur_max = val; } maxim[x] = cur_max; } } // wrapper: find min/max; 32f_C1 IuStatus cuMinMax(iu::VolumeGpu_32f_C1 *src, float& min_C1, float& max_C1) { iu::ImageGpu_32f_C1 minim(src->width(), src->height()); iu::ImageGpu_32f_C1 maxim(src->width(), src->height()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(src->width(), dimBlock.x), iu::divUp(src->height(), dimBlock.y)); cuMinMaxXYKernel_32f_C1<<< dimGrid,dimBlock >>>(minim.data(), maxim.data(), minim.width(), minim.height(), minim.stride(), src->data(), src->depth(), src->stride(), src->slice_stride()); // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); // fragmentation const unsigned int block_width = 512; dim3 dimBlockX(block_width, 1, 1); dim3 dimGridX(iu::divUp(minim.width(), block_width), 1); // find minimum cudaBindTexture2D(0, &tex1_32f_C1__, minim.data(), &channel_desc, minim.width(), minim.height(), minim.pitch()); int num_row_sums = minim.width(); iu::LinearDeviceMemory_32f_C1 row_mins(num_row_sums); cuMinXKernel_32f_C1<<<dimGridX, dimBlockX>>>(row_mins.data(), minim.width(), minim.height()); iu::LinearHostMemory_32f_C1 h_row_mins(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); min_C1 = *h_row_mins.data(0); for (int i = 1; i < num_row_sums; ++i) min_C1 = IUMIN(min_C1, *h_row_mins.data(i)); cudaUnbindTexture(&tex1_32f_C1__); // find maximum cudaBindTexture2D(0, &tex1_32f_C1__, maxim.data(), &channel_desc, maxim.width(), maxim.height(), maxim.pitch()); iu::LinearDeviceMemory_32f_C1 row_maxs(num_row_sums); cuMaxXKernel_32f_C1<<<dimGridX, dimBlockX>>>(row_maxs.data(), maxim.width(), maxim.height()); iu::LinearHostMemory_32f_C1 h_row_maxs(num_row_sums); iuprivate::copy(&row_maxs, &h_row_maxs); max_C1 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) max_C1 = IUMAX(max_C1, *h_row_maxs.data(i)); cudaUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C2 IuStatus cuMinMax(const iu::ImageGpu_32f_C2 *src, const IuRect &roi, float2& min_C2, float2& max_C2) { // prepare and bind texture tex1_32f_C2__.filterMode = cudaFilterModePoint; tex1_32f_C2__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C2__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C2__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_row_sums = roi.width; iu::LinearDeviceMemory_32f_C2 row_mins(num_row_sums); iu::LinearDeviceMemory_32f_C2 row_maxs(num_row_sums); cuMinMaxXKernel_32f_C2 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C2 h_row_mins(num_row_sums); iu::LinearHostMemory_32f_C2 h_row_maxs(num_row_sums); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C2 = *h_row_mins.data(0); max_C2 = *h_row_maxs.data(0); for (int i = 1; i < num_row_sums; ++i) { min_C2.x = IUMIN(min_C2.x, h_row_mins.data(i)->x); min_C2.y = IUMIN(min_C2.y, h_row_mins.data(i)->y); max_C2.x = IUMAX(max_C2.x, h_row_maxs.data(i)->x); max_C2.y = IUMAX(max_C2.y, h_row_maxs.data(i)->y); } cudaUnbindTexture(&tex1_32f_C2__); return iu::checkCudaErrorState(); } // wrapper: find min/max; 32f_C4 IuStatus cuMinMax(const iu::ImageGpu_32f_C4 *src, const IuRect &roi, float4& min_C4, float4& max_C4) { // prepare and bind texture tex1_32f_C4__.filterMode = cudaFilterModePoint; tex1_32f_C4__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C4__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C4__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>(); cudaBindTexture2D(0, &tex1_32f_C4__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C4 row_mins(num_cols); iu::LinearDeviceMemory_32f_C4 row_maxs(num_cols); cuMinMaxXKernel_32f_C4 <<< dimGridX, dimBlock >>> ( row_mins.data(), row_maxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C4 h_row_mins(num_cols); iu::LinearHostMemory_32f_C4 h_row_maxs(num_cols); iuprivate::copy(&row_mins, &h_row_mins); iuprivate::copy(&row_maxs, &h_row_maxs); min_C4 = *h_row_mins.data(0); max_C4 = *h_row_maxs.data(0); for (int i = 1; i < num_cols; ++i) { min_C4.x = IUMIN(min_C4.x, h_row_mins.data(i)->x); min_C4.y = IUMIN(min_C4.y, h_row_mins.data(i)->y); min_C4.z = IUMIN(min_C4.z, h_row_mins.data(i)->z); min_C4.w = IUMIN(min_C4.w, h_row_mins.data(i)->w); max_C4.x = IUMAX(max_C4.x, h_row_maxs.data(i)->x); max_C4.y = IUMAX(max_C4.y, h_row_maxs.data(i)->y); max_C4.z = IUMAX(max_C4.z, h_row_maxs.data(i)->z); max_C4.w = IUMAX(max_C4.w, h_row_maxs.data(i)->w); } cudaUnbindTexture(&tex1_32f_C4__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR MIN + MIN COORDINATES */ // wrapper: find min + min idx; 32f_C1 IuStatus cuMin(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& min, int& min_x, int& min_y) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C1 col_mins(num_cols); iu::LinearDeviceMemory_16u_C1 col_min_idxs(num_cols); cuMinXKernel_32f_C1 <<< dimGridX, dimBlock >>> (col_mins.data(), col_min_idxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_mins(num_cols); iuprivate::copy(&col_mins, &h_col_mins); iu::LinearHostMemory_16u_C1 h_col_min_idxs(num_cols); iuprivate::copy(&col_min_idxs, &h_col_min_idxs); min_x = roi.x; min_y = (int)(roi.y + *h_col_min_idxs.data(0)); min = *h_col_mins.data(0); for (int i = 1; i < num_cols; ++i) { if(min > *h_col_mins.data(i)) { min = *h_col_mins.data(i); min_x = roi.x + i; min_y = (int)(roi.y + *h_col_min_idxs.data(i)); } } cudaUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR MAX + MAX COORDINATES */ // wrapper: find max + max idx; 32f_C1 IuStatus cuMax(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, float& max, int& max_x, int& max_y) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_cols = roi.width; iu::LinearDeviceMemory_32f_C1 col_maxs(num_cols); iu::LinearDeviceMemory_32f_C1 col_max_idxs(num_cols); cuMaxXKernel_32f_C1 <<< dimGridX, dimBlock >>> (col_maxs.data(), col_max_idxs.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_max_idxs(num_cols); iu::LinearHostMemory_32f_C1 h_col_maxs(num_cols); iuprivate::copy(&col_max_idxs, &h_col_max_idxs); iuprivate::copy(&col_maxs, &h_col_maxs); max_y = (int)(roi.y + *h_col_max_idxs.data(0)); max = *h_col_maxs.data(0); for (int i = 1; i < num_cols; ++i) { if(max < *h_col_maxs.data(i)) { max = *h_col_maxs.data(i); max_x = roi.x + i; max_y = (int)(roi.y + *h_col_max_idxs.data(i)); } } cudaUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS FOR SUM */ // wrapper: compute sum; 8u_C1 IuStatus cuSummation(const iu::ImageGpu_8u_C1 *src, const IuRect &roi, long& sum) { // prepare and bind texture tex1_8u_C1__.filterMode = cudaFilterModePoint; tex1_8u_C1__.addressMode[0] = cudaAddressModeClamp; tex1_8u_C1__.addressMode[1] = cudaAddressModeClamp; tex1_8u_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>(); cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_col_sums = roi.width; iu::LinearDeviceMemory_8u_C1 col_sums(num_col_sums); cuSumColKernel_8u_C1 <<< dimGridX, dimBlock >>> ( col_sums.data(), roi.x, roi.y, roi.width, roi.height); // :TODO: 32f vs 32u? iu::LinearHostMemory_8u_C1 h_col_sums(num_col_sums); iuprivate::copy(&col_sums, &h_col_sums); sum = 0; for (int i = 0; i < num_col_sums; ++i) { sum += static_cast<unsigned int>(*h_col_sums.data(i)); } cudaUnbindTexture(&tex1_8u_C1__); return iu::checkCudaErrorState(); } // wrapper: compute sum; 32f_C1 IuStatus cuSummation(const iu::ImageGpu_32f_C1 *src, const IuRect &roi, double& sum) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation const unsigned int block_width = 512; dim3 dimBlock(block_width, 1, 1); dim3 dimGridX(iu::divUp(roi.width, block_width), 1); // temporary memory for row sums on the host int num_col_sums = roi.width; iu::LinearDeviceMemory_32f_C1 col_sums(num_col_sums); cuSumColKernel_32f_C1 <<< dimGridX, dimBlock >>> ( col_sums.data(), roi.x, roi.y, roi.width, roi.height); iu::LinearHostMemory_32f_C1 h_col_sums(num_col_sums); iuprivate::copy(&col_sums, &h_col_sums); sum = 0.0; for (int i = 0; i < num_col_sums; ++i) { sum += *h_col_sums.data(i); } cudaUnbindTexture(&tex1_32f_C1__); return iu::checkCudaErrorState(); } /* WRAPPERS for NORM OF DIFFERENCES */ // wrapper: compute L1 norm; |image1-image2|; IuStatus cuNormDiffL1(const iu::ImageGpu_32f_C1* src1, const iu::ImageGpu_32f_C1* src2, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); // prepare and bind texture tex2_32f_C1__.filterMode = cudaFilterModePoint; tex2_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex2_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex2_32f_C1__.normalized = false; cudaBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); cuNormDiffL1Kernel_32f_C1 <<< dimGrid, dimBlock >>> ( squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L1 norm; |image1-value|; IuStatus cuNormDiffL1(const iu::ImageGpu_32f_C1* src, const float& value, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); cuNormDiffValueL1Kernel_32f_C1 <<< dimGrid, dimBlock >>> ( value, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L2 norm; ||image1-image2||; IuStatus cuNormDiffL2(const iu::ImageGpu_32f_C1* src1, const iu::ImageGpu_32f_C1* src2, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); // prepare and bind texture tex2_32f_C1__.filterMode = cudaFilterModePoint; tex2_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex2_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex2_32f_C1__.normalized = false; cudaBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); cuNormDiffL2Kernel_32f_C1 <<< dimGrid, dimBlock >>> ( squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } // wrapper: compute L2 norm; ||image1-value||; IuStatus cuNormDiffL2(const iu::ImageGpu_32f_C1* src, const float& value, const IuRect& roi, double& norm) { // prepare and bind texture tex1_32f_C1__.filterMode = cudaFilterModePoint; tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 squared_deviances(roi.width, roi.height); cuNormDiffValueL2Kernel_32f_C1 <<< dimGrid, dimBlock >>> ( value, squared_deviances.data(roi.x, roi.y), squared_deviances.stride(), roi.x, roi.y, roi.width, roi.height); double sum_squared = 0.0; iuprivate::cuSummation(&squared_deviances, roi, sum_squared); norm = sqrtf(sum_squared); return iu::checkCudaErrorState(); } /* WRAPPERS for ERROR MEASUREMENTS */ // kernel: compute MSE __global__ void cuMseKernel(float* dst, size_t stride, int xoff, int yoff, int width, int height) { // calculate absolute texture coordinates const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if ((x < width) && (y < height)) { float diff = tex2D(tex1_32f_C1__, x+xoff+0.5f, y+yoff+0.5f) - tex2D(tex2_32f_C1__, x+xoff+0.5f, y+yoff+0.5f); dst[y*stride + x] = diff*diff; } } // wrapper: compute MSE IuStatus cuMse(const iu::ImageGpu_32f_C1* src, const iu::ImageGpu_32f_C1* reference, const IuRect& roi, double& mse) { tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.filterMode = cudaFilterModeLinear; tex1_32f_C1__.normalized = false; tex2_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex2_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex2_32f_C1__.filterMode = cudaFilterModeLinear; tex2_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); cudaBindTexture2D(0, &tex2_32f_C1__, reference->data(), &channel_desc, reference->width(), reference->height(), reference->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 tmp(roi.width, roi.height); iuprivate::setValue(0.0f, &tmp, tmp.roi()); cuMseKernel <<< dimGrid,dimBlock >>> ( tmp.data(), tmp.stride(), roi.x, roi.y, roi.width, roi.height); double sum = 0.0; cuSummation(&tmp, tmp.roi(), sum); mse = sum/(static_cast<float>(roi.width*roi.height)); return iu::checkCudaErrorState(); } // kernel: compute SSIM __global__ void cuSsimKernel(float c1, float c2, float* dst, size_t stride, int xoff, int yoff, int width, int height) { // calculate absolute texture coordinates const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if ((x < width) && (y < height)) { int hkl = -4; int hkr = 4; // Calc means float mu_in = 0.0f; float mu_ref = 0.0f; float n = 0.0f; for (int dx=hkl; dx<=hkr; dx++) { for (int dy=hkl; dy<=hkr; dy++) { mu_in += tex2D(tex1_32f_C1__, x+dx+0.5f, y+dy+0.5f); mu_ref += tex2D(tex2_32f_C1__, x+dx+0.5f, y+dy+0.5f); n++; } } mu_in /= n; mu_ref /= n; // Calc variance and covariance float sigma_in = 0.0f; float sigma_ref = 0.0f; float cov = 0.0f; for (int dx=hkl; dx<=hkr; dx++) { for (int dy=hkl; dy<=hkr; dy++) { float in = tex2D(tex1_32f_C1__, x+dx+0.5f, y+dy+0.5f) - mu_in; float ref = tex2D(tex2_32f_C1__, x+dx+0.5f, y+dy+0.5f) - mu_ref; sigma_in += in*in; sigma_ref += ref*ref; cov += in*ref; } } sigma_in /= n-1.0f; sigma_ref /= n-1.0f; cov /= n-1.0f; // Calculate Structural similarity dst[y*stride + x] = (2.0f*mu_in*mu_ref + c1)*(2.0f*cov + c2)/((mu_in*mu_in + mu_ref*mu_ref + c1)*(sigma_in + sigma_ref + c2)); } } // wrapper: compute SSIM IuStatus cuSsim(const iu::ImageGpu_32f_C1* src, const iu::ImageGpu_32f_C1* reference, const IuRect& roi, double& ssim) { tex1_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex1_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex1_32f_C1__.filterMode = cudaFilterModeLinear; tex1_32f_C1__.normalized = false; tex2_32f_C1__.addressMode[0] = cudaAddressModeClamp; tex2_32f_C1__.addressMode[1] = cudaAddressModeClamp; tex2_32f_C1__.filterMode = cudaFilterModeLinear; tex2_32f_C1__.normalized = false; cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float1>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); cudaBindTexture2D(0, &tex2_32f_C1__, reference->data(), &channel_desc, reference->width(), reference->height(), reference->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(roi.width - roi.x, dimBlock.x), iu::divUp(roi.height - roi.y, dimBlock.y)); iu::ImageGpu_32f_C1 tmp(roi.width, roi.height); iuprivate::setValue(0.0f, &tmp, tmp.roi()); float k1 = 0.01f; float k2 = 0.03f; float dynamic_range = 1.0f; float c1 = (k1*dynamic_range)*(k1*dynamic_range); float c2 = (k2*dynamic_range)*(k2*dynamic_range); cuSsimKernel <<< dimGrid,dimBlock >>> ( c1, c2, tmp.data(), tmp.stride(), roi.x, roi.y, roi.width, roi.height); double sum = 0.0; cuSummation(&tmp, tmp.roi(), sum); ssim = ssim/(static_cast<float>(roi.width*roi.height)); return iu::checkCudaErrorState(); } // kernel: color histogram __global__ void cuColorHistogramKernel(float* hist, int width, int height, int hstrideX, int hstrideXY, unsigned char mask_val) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<width && y<height) { if (tex2D(tex1_8u_C1__, x+0.5f, y+0.5f) == mask_val) { #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 200 uchar4 bins = tex2D(tex1_8u_C4__, x+0.5f, y+0.5f); int hc = bins.x + bins.y*hstrideX + bins.z*hstrideXY; atomicAdd(&hist[hc], 1.0f); #else #if __CUDA_ARCH__ >= 120 uchar4 bins = tex2D(tex1_8u_C4__, x+0.5f, y+0.5f); int hc = bins.x + bins.y*hstrideX + bins.z*hstrideXY; histogramAtomicAdd(&hist[hc], 1.0f); #else #if !WIN32 #warning Color Histograms will not work: >= sm_12 needed! #endif #endif #endif } } } // wrapper: color histogram IuStatus cuColorHistogram(const iu::ImageGpu_8u_C4* binned_image, const iu::ImageGpu_8u_C1* mask, iu::VolumeGpu_32f_C1* hist, unsigned char mask_val) { tex1_8u_C4__.addressMode[0] = cudaAddressModeClamp; tex1_8u_C4__.addressMode[1] = cudaAddressModeClamp; tex1_8u_C4__.filterMode = cudaFilterModePoint; tex1_8u_C4__.normalized = false; tex1_8u_C1__.addressMode[0] = cudaAddressModeClamp; tex1_8u_C1__.addressMode[1] = cudaAddressModeClamp; tex1_8u_C1__.filterMode = cudaFilterModePoint; tex1_8u_C1__.normalized = false; setValue(0.0f, hist, hist->roi()); cudaChannelFormatDesc channel_desc_c4 = cudaCreateChannelDesc<uchar4>(); cudaBindTexture2D(0, &tex1_8u_C4__, binned_image->data(), &channel_desc_c4, binned_image->width(), binned_image->height(), binned_image->pitch()); cudaChannelFormatDesc channel_desc_c1 = cudaCreateChannelDesc<unsigned char>(); cudaBindTexture2D(0, &tex1_8u_C1__, mask->data(), &channel_desc_c1, mask->width(), mask->height(), mask->pitch()); const unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(binned_image->width(), dimBlock.x), iu::divUp(binned_image->height(), dimBlock.y)); cuColorHistogramKernel<<<dimGrid,dimBlock>>>(hist->data(), binned_image->width(), binned_image->height(), hist->stride(), hist->slice_stride(), mask_val); return iu::checkCudaErrorState(); } } // namespace iuprivate #endif // IUMATH_STATISTICS_CU
f52f7009a67cbcdab7867c90b215198e8b551c37.hip
// !!! This is a file automatically generated by hipify!!! #include "HashGrid.cuh" void writeToFileCPU(const std::string& name, std::vector<std::vector<uint>> grid, uint3 gridSize) { std::ofstream outputFile(name, std::ifstream::binary); for (int i = 0; i < gridSize.x * gridSize.y * gridSize.z; i++) { uint64_t num = grid[i].size(); outputFile.write((char*)&num, 4); } outputFile.close(); } ParticleList reduceParticles(const ParticleList pList, uint targetCount) { std::random_device rd; /* Random number generator */ std::default_random_engine generator(rd()); /* Distribution on which to apply the generator */ std::uniform_int_distribution<unsigned int> distribution(0, pList.info.groupCount - 1); bool* b = new bool[pList.info.groupCount]; for (int i = 0; i < pList.info.groupCount; i++) { b[i] = false; } for (int i = 0; i < pList.info.groupCount - targetCount; i++) { uint rNum = distribution(generator); while (b[rNum] == true) { rNum = distribution(generator); } b[rNum] = true; } char* output = new char[targetCount * pList.info.stride]; int index = 0; for (int i = 0; i < pList.info.groupCount; i++) { if (b[i] == false) { for (int a = 0; a < pList.info.stride; a++) { output[index * pList.info.stride + a] = pList.data[i * pList.info.stride + a]; } index++; } } ParticleInfo pInfo = pList.info; pInfo.groupCount = targetCount; ParticleList particles; particles.info = pInfo; particles.data = output; delete[] b; return particles; } float benchmarkPListGPU(ParticleList p, PSystemInfo pSysInfo, int iterations = 1) { float minTime = 100000000.0f; for (int i = 0; i < iterations; i++) { HashGrid hGrid = HashGrid(p, pSysInfo); // Particle particle; // particle.pos = make_float3(60, 600, 60); // particle.radius = 60; // std::cout << getNumberOfNeighboursGPUOld(d_List, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle, isAligned) << std::endl; // std::cout << getNumberOfNeighboursGPU(d_List, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle, isAligned) << std::endl; // std::cout << getNumberOfNeighboursCPU(p.data, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle) << std::endl; if (hGrid.initHashGridTime < minTime) minTime = hGrid.initHashGridTime; } std::cout << "GPU: Particle count: " << p.info.groupCount << " Time: " << minTime << std::endl; return minTime; } void benchmarkPListGPU(ParticleList p, PSystemInfo pSysInfo, float& minTime, float& minCopyTime, float& minAllocTime, float& minKernelTime, int iterations = 1) { minTime = std::numeric_limits<float>::max(); minCopyTime = std::numeric_limits<float>::max(); minAllocTime = std::numeric_limits<float>::max(); minKernelTime = std::numeric_limits<float>::max(); for (int i = 0; i < iterations; i++) { HashGrid hGrid = HashGrid(p, pSysInfo); if (hGrid.initHashGridTime < minTime) minTime = hGrid.initHashGridTime; if (hGrid.copyDataTime < minCopyTime) minCopyTime = hGrid.copyDataTime; if (hGrid.allocDataTime < minAllocTime) minAllocTime = hGrid.allocDataTime; if (hGrid.kernelTime < minKernelTime) minKernelTime = hGrid.kernelTime; } std::cout << "GPU: Particle count: " << p.info.groupCount << " Time: " << minTime << std::endl; } long long benchmarkPListCPU(ParticleList p, PSystemInfo pSysInfo, int iterations = 1) { unsigned long long minTime = -1; for (int a = 0; a < iterations; a++) { long long startTime = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); std::vector<std::vector<uint>> grid(pSysInfo.gridSize.x * pSysInfo.gridSize.y * pSysInfo.gridSize.z); for (uint i = 0; i < p.info.groupCount; i++) grid[calcGridHash(calcGridPos(*(float3*)(p.data + i * p.info.stride), pSysInfo), pSysInfo)].push_back(i); long long endTime = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); if (endTime - startTime < minTime) minTime = endTime - startTime; // writeToFile("testFile.raw", grid, pSysInfo.gridSize); } std::cout << "CPU: Particle count: " << p.info.groupCount << " Time: " << minTime / 1000.0f << std::endl; return minTime; } int main(int argc, char **argv) { Loader loader("laser.00080.chkpt.density.mmpld"); auto pLists = loader.getFrame(0); /* for (int i = 1; i < 11; i++) { std::cout << "Grid Size: (" << pSysInfo.gridSize.x << ", " << pSysInfo.gridSize.y << ", " << pSysInfo.gridSize.z << ") Origin: (" << pSysInfo.worldOrigin.x << ", " << pSysInfo.worldOrigin.y << ", " << pSysInfo.worldOrigin.z << ") Cell Size: (" << pSysInfo.cellSize.x << ", " << pSysInfo.cellSize.y << ", " << pSysInfo.cellSize.z << ")" << std::endl; benchmarkPListGPU(pLists[0], pSysInfo, 128, 100); benchmarkPListCPU(pLists[0], pSysInfo, 100); gridSize.x *= 2; gridSize.y *= 2; gridSize.z *= 2; pSysInfo = loader.calcBSystemInfo(gridSize); } */ // loader.isInBBox(pLists, pSysInfo); // ParticleList pList = reduceParticles(pLists[0], 20000000); // benchmarkPListGPU(pLists[0], pSysInfo, 10); // benchmarkPListCPU(pLists[0], pSysInfo, 10); // HashGrid hGrid = HashGrid(pLists[0], pSysInfo); // hGrid.writeToRawFile("exp2mill.raw"); std::ofstream outputFile("benchmark.csv"); outputFile << "Dimension; GPU; CPU\n"; for (uint i = 0; i <= 9; i++) { uint3 gridSize; gridSize.x = ::pow(2, i); gridSize.y = ::pow(2, i); gridSize.z = ::pow(2, i); PSystemInfo pSysInfo = loader.calcBSystemInfo(gridSize); outputFile << gridSize.x << "x" << gridSize.x << "x" << gridSize.x << ";"; outputFile << std::round(benchmarkPListGPU(pLists[0], pSysInfo, 10)) << ";"; outputFile << std::round(benchmarkPListCPU(pLists[0], pSysInfo, 10) / 1000.0f) << "\n"; /* float time; Particle p; // p.pos = make_float3(pSysInfo.gridSize.x * 0.5f * pSysInfo.cellSize.x + pSysInfo.worldOrigin.x, pSysInfo.gridSize.y * 0.5f * pSysInfo.cellSize.y + pSysInfo.worldOrigin.y, pSysInfo.gridSize.z * 0.5f * pSysInfo.cellSize.z + pSysInfo.worldOrigin.z); // p.radius = i * pSysInfo.cellSize.x; p.pos = make_float3(pSysInfo.worldOrigin.x + pSysInfo.gridSize.x * pSysInfo.cellSize.x * i / 30.0f, pSysInfo.gridSize.y * 0.5f * pSysInfo.cellSize.y + pSysInfo.worldOrigin.y, pSysInfo.gridSize.z * 0.5f * pSysInfo.cellSize.z + pSysInfo.worldOrigin.z); p.radius = 60; std::cout << "Radius: " << p.radius << std::endl; std::cout << p.pos.x << ", " << p.pos.y << ", " << p.pos.z << std::endl; outputFile << std::round(p.pos.x) << ";"; uint nGPU = hGrid.getNumberOfNeighboursGPU(p, time, 10); outputFile << std::round(time * 1000) << ";"; uint nCPU = hGrid.getNumberOfNeighboursCPU(p, time, 10); outputFile << std::round(time * 1000) << "\n"; std::cout << "GPU: " << nGPU << "; CPU: " << nCPU << std::endl; */ } outputFile.close(); // HashGrid hGrid = HashGrid(pLists[0], pSysInfo); // hGrid.writeToRawFile("60mill.raw"); return 0; }
f52f7009a67cbcdab7867c90b215198e8b551c37.cu
#include "HashGrid.cuh" void writeToFileCPU(const std::string& name, std::vector<std::vector<uint>> grid, uint3 gridSize) { std::ofstream outputFile(name, std::ifstream::binary); for (int i = 0; i < gridSize.x * gridSize.y * gridSize.z; i++) { uint64_t num = grid[i].size(); outputFile.write((char*)&num, 4); } outputFile.close(); } ParticleList reduceParticles(const ParticleList pList, uint targetCount) { std::random_device rd; /* Random number generator */ std::default_random_engine generator(rd()); /* Distribution on which to apply the generator */ std::uniform_int_distribution<unsigned int> distribution(0, pList.info.groupCount - 1); bool* b = new bool[pList.info.groupCount]; for (int i = 0; i < pList.info.groupCount; i++) { b[i] = false; } for (int i = 0; i < pList.info.groupCount - targetCount; i++) { uint rNum = distribution(generator); while (b[rNum] == true) { rNum = distribution(generator); } b[rNum] = true; } char* output = new char[targetCount * pList.info.stride]; int index = 0; for (int i = 0; i < pList.info.groupCount; i++) { if (b[i] == false) { for (int a = 0; a < pList.info.stride; a++) { output[index * pList.info.stride + a] = pList.data[i * pList.info.stride + a]; } index++; } } ParticleInfo pInfo = pList.info; pInfo.groupCount = targetCount; ParticleList particles; particles.info = pInfo; particles.data = output; delete[] b; return particles; } float benchmarkPListGPU(ParticleList p, PSystemInfo pSysInfo, int iterations = 1) { float minTime = 100000000.0f; for (int i = 0; i < iterations; i++) { HashGrid hGrid = HashGrid(p, pSysInfo); // Particle particle; // particle.pos = make_float3(60, 600, 60); // particle.radius = 60; // std::cout << getNumberOfNeighboursGPUOld(d_List, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle, isAligned) << std::endl; // std::cout << getNumberOfNeighboursGPU(d_List, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle, isAligned) << std::endl; // std::cout << getNumberOfNeighboursCPU(p.data, d_CellBegin, d_CellEnd, d_IdList, p.info, pSysInfo, particle) << std::endl; if (hGrid.initHashGridTime < minTime) minTime = hGrid.initHashGridTime; } std::cout << "GPU: Particle count: " << p.info.groupCount << " Time: " << minTime << std::endl; return minTime; } void benchmarkPListGPU(ParticleList p, PSystemInfo pSysInfo, float& minTime, float& minCopyTime, float& minAllocTime, float& minKernelTime, int iterations = 1) { minTime = std::numeric_limits<float>::max(); minCopyTime = std::numeric_limits<float>::max(); minAllocTime = std::numeric_limits<float>::max(); minKernelTime = std::numeric_limits<float>::max(); for (int i = 0; i < iterations; i++) { HashGrid hGrid = HashGrid(p, pSysInfo); if (hGrid.initHashGridTime < minTime) minTime = hGrid.initHashGridTime; if (hGrid.copyDataTime < minCopyTime) minCopyTime = hGrid.copyDataTime; if (hGrid.allocDataTime < minAllocTime) minAllocTime = hGrid.allocDataTime; if (hGrid.kernelTime < minKernelTime) minKernelTime = hGrid.kernelTime; } std::cout << "GPU: Particle count: " << p.info.groupCount << " Time: " << minTime << std::endl; } long long benchmarkPListCPU(ParticleList p, PSystemInfo pSysInfo, int iterations = 1) { unsigned long long minTime = -1; for (int a = 0; a < iterations; a++) { long long startTime = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); std::vector<std::vector<uint>> grid(pSysInfo.gridSize.x * pSysInfo.gridSize.y * pSysInfo.gridSize.z); for (uint i = 0; i < p.info.groupCount; i++) grid[calcGridHash(calcGridPos(*(float3*)(p.data + i * p.info.stride), pSysInfo), pSysInfo)].push_back(i); long long endTime = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count(); if (endTime - startTime < minTime) minTime = endTime - startTime; // writeToFile("testFile.raw", grid, pSysInfo.gridSize); } std::cout << "CPU: Particle count: " << p.info.groupCount << " Time: " << minTime / 1000.0f << std::endl; return minTime; } int main(int argc, char **argv) { Loader loader("laser.00080.chkpt.density.mmpld"); auto pLists = loader.getFrame(0); /* for (int i = 1; i < 11; i++) { std::cout << "Grid Size: (" << pSysInfo.gridSize.x << ", " << pSysInfo.gridSize.y << ", " << pSysInfo.gridSize.z << ") Origin: (" << pSysInfo.worldOrigin.x << ", " << pSysInfo.worldOrigin.y << ", " << pSysInfo.worldOrigin.z << ") Cell Size: (" << pSysInfo.cellSize.x << ", " << pSysInfo.cellSize.y << ", " << pSysInfo.cellSize.z << ")" << std::endl; benchmarkPListGPU(pLists[0], pSysInfo, 128, 100); benchmarkPListCPU(pLists[0], pSysInfo, 100); gridSize.x *= 2; gridSize.y *= 2; gridSize.z *= 2; pSysInfo = loader.calcBSystemInfo(gridSize); } */ // loader.isInBBox(pLists, pSysInfo); // ParticleList pList = reduceParticles(pLists[0], 20000000); // benchmarkPListGPU(pLists[0], pSysInfo, 10); // benchmarkPListCPU(pLists[0], pSysInfo, 10); // HashGrid hGrid = HashGrid(pLists[0], pSysInfo); // hGrid.writeToRawFile("exp2mill.raw"); std::ofstream outputFile("benchmark.csv"); outputFile << "Dimension; GPU; CPU\n"; for (uint i = 0; i <= 9; i++) { uint3 gridSize; gridSize.x = std::pow(2, i); gridSize.y = std::pow(2, i); gridSize.z = std::pow(2, i); PSystemInfo pSysInfo = loader.calcBSystemInfo(gridSize); outputFile << gridSize.x << "x" << gridSize.x << "x" << gridSize.x << ";"; outputFile << std::round(benchmarkPListGPU(pLists[0], pSysInfo, 10)) << ";"; outputFile << std::round(benchmarkPListCPU(pLists[0], pSysInfo, 10) / 1000.0f) << "\n"; /* float time; Particle p; // p.pos = make_float3(pSysInfo.gridSize.x * 0.5f * pSysInfo.cellSize.x + pSysInfo.worldOrigin.x, pSysInfo.gridSize.y * 0.5f * pSysInfo.cellSize.y + pSysInfo.worldOrigin.y, pSysInfo.gridSize.z * 0.5f * pSysInfo.cellSize.z + pSysInfo.worldOrigin.z); // p.radius = i * pSysInfo.cellSize.x; p.pos = make_float3(pSysInfo.worldOrigin.x + pSysInfo.gridSize.x * pSysInfo.cellSize.x * i / 30.0f, pSysInfo.gridSize.y * 0.5f * pSysInfo.cellSize.y + pSysInfo.worldOrigin.y, pSysInfo.gridSize.z * 0.5f * pSysInfo.cellSize.z + pSysInfo.worldOrigin.z); p.radius = 60; std::cout << "Radius: " << p.radius << std::endl; std::cout << p.pos.x << ", " << p.pos.y << ", " << p.pos.z << std::endl; outputFile << std::round(p.pos.x) << ";"; uint nGPU = hGrid.getNumberOfNeighboursGPU(p, time, 10); outputFile << std::round(time * 1000) << ";"; uint nCPU = hGrid.getNumberOfNeighboursCPU(p, time, 10); outputFile << std::round(time * 1000) << "\n"; std::cout << "GPU: " << nGPU << "; CPU: " << nCPU << std::endl; */ } outputFile.close(); // HashGrid hGrid = HashGrid(pLists[0], pSysInfo); // hGrid.writeToRawFile("60mill.raw"); return 0; }
888132637f7e3e55b19b23ac6ef7cc51f71aba17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <iostream> #include <unistd.h> #define ARR_SIZE 10 #define NUM_THR 8 typedef struct { int *arr; int *dev_arr; int *dev_result; int *result; int dev_num; int thr_num; } cuda_st; __global__ void kernel_fc(int *dev_arr, int *dev_result) { int idx = threadIdx.x; printf("dev_arr[%d] = %d\n", idx, dev_arr[idx]); atomicAdd(dev_result, dev_arr[idx]); } void *thread_func(void* struc) { cuda_st * data = (cuda_st*)struc; printf("thread %d func start\n", data->thr_num); printf("arr %d = ", data->dev_num); for(int i=0; i<10; i++) { printf("%d ", data->arr[i]); } printf("\n"); hipSetDevice(data->dev_num); hipMemcpy(data->dev_arr, data->arr, sizeof(int)*ARR_SIZE, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_fc), dim3(1),dim3(ARR_SIZE), 0, 0, data->dev_arr, data->dev_result); hipMemcpy(data->result, data->dev_result, sizeof(int), hipMemcpyDeviceToHost); printf("thread %d func exit\n", data->thr_num); return NULL; } int main(void) { hipError_t err; int count = 0; err = hipGetDeviceCount(&count); if (err != hipSuccess) { std::cout << "error in hipGetDeviceCount(), #=" << hipGetErrorString(err) << std::endl; } printf("%d devices found.\n", count); for(int i=0; i<count; i++) { hipSetDevice(i); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, i); std::cout << "Using device " << i << ", name: " << deviceProp.name << std::endl; } // Make object cuda_st cuda[count][NUM_THR]; // Make thread pthread_t pthread[count*NUM_THR]; // Host array memory allocation int *arr[count]; for(int i=0; i<count; i++) { arr[i] = (int*)malloc(sizeof(int)*ARR_SIZE); } // Fill this host array up with specified data for(int i=0; i<count; i++) { for(int j=0; j<ARR_SIZE; j++) { arr[i][j] = i*ARR_SIZE+j; } } // To confirm host array data for(int i=0; i<count; i++) { printf("arr[%d] = ", i); for(int j=0; j<ARR_SIZE; j++) { printf("%d ", arr[i][j]); } printf("\n"); } // Result memory allocation int *result[count]; for(int i=0; i<count; i++) { result[i] = (int*)malloc(sizeof(int)); memset(result[i], 0, sizeof(int)); } // Device array memory allocation int *dev_arr[count]; for(int i=0; i<count; i++) { hipSetDevice(i); hipMalloc(&dev_arr[i], sizeof(int)*ARR_SIZE); } // Device result memory allocation int *dev_result[count]; for(int i=0; i<count; i++) { hipSetDevice(i); hipMalloc(&dev_result[i], sizeof(int)); hipMemset(dev_result[i], 0, sizeof(int)); } // Connect these pointers with object for (int i=0; i<count; i++) { for (int j=0; j<NUM_THR; j++) { cuda[i][j].arr = arr[i]; cuda[i][j].dev_arr = dev_arr[i]; cuda[i][j].result = result[i]; cuda[i][j].dev_result = dev_result[i]; cuda[i][j].dev_num = i; cuda[i][j].thr_num = j; } } // Create and excute pthread for(int i=0; i<count; i++) { for (int j=0; j<NUM_THR; j++) { pthread_create(&pthread[(i*NUM_THR)+j], NULL, thread_func, (void*)&cuda[i][j]); } // Join pthread for(int j=0; j<NUM_THR; j++) { pthread_join(pthread[j], NULL); printf("result[%d][%d] = %d\n", i,j, (*cuda[i][j].result)); } } //hipDeviceReset(); return 0; }
888132637f7e3e55b19b23ac6ef7cc51f71aba17.cu
#include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <iostream> #include <unistd.h> #define ARR_SIZE 10 #define NUM_THR 8 typedef struct { int *arr; int *dev_arr; int *dev_result; int *result; int dev_num; int thr_num; } cuda_st; __global__ void kernel_fc(int *dev_arr, int *dev_result) { int idx = threadIdx.x; printf("dev_arr[%d] = %d\n", idx, dev_arr[idx]); atomicAdd(dev_result, dev_arr[idx]); } void *thread_func(void* struc) { cuda_st * data = (cuda_st*)struc; printf("thread %d func start\n", data->thr_num); printf("arr %d = ", data->dev_num); for(int i=0; i<10; i++) { printf("%d ", data->arr[i]); } printf("\n"); cudaSetDevice(data->dev_num); cudaMemcpy(data->dev_arr, data->arr, sizeof(int)*ARR_SIZE, cudaMemcpyHostToDevice); kernel_fc<<<1,ARR_SIZE>>>(data->dev_arr, data->dev_result); cudaMemcpy(data->result, data->dev_result, sizeof(int), cudaMemcpyDeviceToHost); printf("thread %d func exit\n", data->thr_num); return NULL; } int main(void) { cudaError_t err; int count = 0; err = cudaGetDeviceCount(&count); if (err != cudaSuccess) { std::cout << "error in cudaGetDeviceCount(), #=" << cudaGetErrorString(err) << std::endl; } printf("%d devices found.\n", count); for(int i=0; i<count; i++) { cudaSetDevice(i); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, i); std::cout << "Using device " << i << ", name: " << deviceProp.name << std::endl; } // Make object cuda_st cuda[count][NUM_THR]; // Make thread pthread_t pthread[count*NUM_THR]; // Host array memory allocation int *arr[count]; for(int i=0; i<count; i++) { arr[i] = (int*)malloc(sizeof(int)*ARR_SIZE); } // Fill this host array up with specified data for(int i=0; i<count; i++) { for(int j=0; j<ARR_SIZE; j++) { arr[i][j] = i*ARR_SIZE+j; } } // To confirm host array data for(int i=0; i<count; i++) { printf("arr[%d] = ", i); for(int j=0; j<ARR_SIZE; j++) { printf("%d ", arr[i][j]); } printf("\n"); } // Result memory allocation int *result[count]; for(int i=0; i<count; i++) { result[i] = (int*)malloc(sizeof(int)); memset(result[i], 0, sizeof(int)); } // Device array memory allocation int *dev_arr[count]; for(int i=0; i<count; i++) { cudaSetDevice(i); cudaMalloc(&dev_arr[i], sizeof(int)*ARR_SIZE); } // Device result memory allocation int *dev_result[count]; for(int i=0; i<count; i++) { cudaSetDevice(i); cudaMalloc(&dev_result[i], sizeof(int)); cudaMemset(dev_result[i], 0, sizeof(int)); } // Connect these pointers with object for (int i=0; i<count; i++) { for (int j=0; j<NUM_THR; j++) { cuda[i][j].arr = arr[i]; cuda[i][j].dev_arr = dev_arr[i]; cuda[i][j].result = result[i]; cuda[i][j].dev_result = dev_result[i]; cuda[i][j].dev_num = i; cuda[i][j].thr_num = j; } } // Create and excute pthread for(int i=0; i<count; i++) { for (int j=0; j<NUM_THR; j++) { pthread_create(&pthread[(i*NUM_THR)+j], NULL, thread_func, (void*)&cuda[i][j]); } // Join pthread for(int j=0; j<NUM_THR; j++) { pthread_join(pthread[j], NULL); printf("result[%d][%d] = %d\n", i,j, (*cuda[i][j].result)); } } //cudaThreadExit(); return 0; }