hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
176aaff7a664433bdbb0380141ca73347024daf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2019 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/leakyReLU_plugin.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void ReLU(const int nthreads, const Dtype *in_data, const float negative_slope, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { out_data[index] = in_data[index]; if (out_data[index] < 0.0) { out_data[index] *= negative_slope; } } } int ReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { const int thread_size = 512; const int block_size = (input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize + thread_size - 1) / thread_size; const int nthreads = input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize; hipLaunchKernelGGL(( ReLU), dim3(block_size), dim3(thread_size), 0, stream, nthreads, (const float *)(inputs[0]), negative_slope_, reinterpret_cast<float *>(outputs[0])); return 1; } } // namespace inference } // namespace perception } // namespace apollo
176aaff7a664433bdbb0380141ca73347024daf9.cu
/****************************************************************************** * Copyright 2019 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/leakyReLU_plugin.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void ReLU(const int nthreads, const Dtype *in_data, const float negative_slope, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { out_data[index] = in_data[index]; if (out_data[index] < 0.0) { out_data[index] *= negative_slope; } } } int ReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { const int thread_size = 512; const int block_size = (input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize + thread_size - 1) / thread_size; const int nthreads = input_dims_.d[0] * input_dims_.d[1] * input_dims_.d[2] * batchSize; ReLU<<<block_size, thread_size, 0, stream>>>( nthreads, (const float *)(inputs[0]), negative_slope_, reinterpret_cast<float *>(outputs[0])); return 1; } } // namespace inference } // namespace perception } // namespace apollo
0ba5f8f135546bf100d7715aa26671afa1c6a561.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Zero Intelligence Traders Implemented for CUDA devices for comparison with CPU/pthreads version. Intended to follow logically as closely as possbile to a CPU version written by Robert Axtell. Previous code used with permission by: Robert Axtell The Brookings Institution and George Mason University */ #include <stdio.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define false 0 #define true 1 #define seedRandomWithTime false // if false, seed the generator with 'seed' #define seed 1 #define buyer true #define seller false // Specify the maximum internal values... #define maxBuyerValue 20 #define maxSellerValue 20 // Specify the number of agents of each type... #define numberOfBuyers 1000000 #define numberOfSellers 1000000 #define MaxNumberOfTrades 100000000 #define numThreads 512 // Define an agent... typedef struct { int buyerOrSeller; int quantityHeld; int value; int price; } Agent; // Declare the agent populations... //Agent Buyers[numberOfBuyers]; //Agent Sellers[numberOfSellers]; Agent *Buyers; Agent *Sellers; const int agentsPerThread = numberOfBuyers/numThreads; const int tradesPerThread = MaxNumberOfTrades/numThreads; // Seeds for one random number generator per thread unsigned int seeds[numThreads]; ///////////////// // // Procedures... // ///////////////// void InitializeMiscellaneous() // // Initialize the random number generator; cannot use srand() or rand() as these are not thread safe { unsigned int i; for (i = 0; i<numThreads; i++) if (seedRandomWithTime) seeds[i] = (unsigned int) time(NULL); else seeds[i] = seed + i; } // InitializeMiscellaneous() void InitializeAgents() // // Fill the agent fields... // { // allocate managed memory for buyers and sellers size_t agentSize = numberOfBuyers * sizeof(Agent); hipMallocManaged(&Buyers, agentSize); hipMallocManaged(&Sellers, agentSize); int i; // First the buyers... for (i=0; i<numberOfBuyers; i=i+1) { Buyers[i].buyerOrSeller = buyer; Buyers[i].quantityHeld = 0; Buyers[i].value = (rand_r(&seeds[0]) % maxBuyerValue) + 1; }; // Now the sellers... for (i=0; i<numberOfSellers; i=i+1) { Sellers[i].buyerOrSeller = seller; Sellers[i].quantityHeld = 1; Sellers[i].value = (rand_r(&seeds[0]) % maxSellerValue) + 1; }; } // InitializeAgents() //void *DoTrades (void *threadN) __global__ void doTrades(Agent *Buyers, Agent *Sellers) { // // This function pairs agents at random and then selects a price randomly... // hiprandState_t state; hiprand_init(0, 0, 0, &state); int i, buyerIndex, sellerIndex; int bidPrice, askPrice, transactionPrice; //int threadNum = *(int*) threadN; int threadNum = threadIdx.x; int lowerBuyerBound, upperBuyerBound, lowerSellerBound, upperSellerBound; /* if (numThreads <= 10) printf("Thread %i up and running\n", threadNum); */ lowerBuyerBound = threadNum * agentsPerThread; upperBuyerBound = (threadNum + 1) * agentsPerThread - 1; lowerSellerBound = threadNum * agentsPerThread; upperSellerBound = (threadNum + 1) * agentsPerThread - 1; for (i=1; i<=tradesPerThread; i++) { // Pick a buyer at random who has not already bought a unit, // then pick a 'bid' price randomly between 1 and the agent's private value; // do { //buyerIndex = lowerBuyerBound + rand_r(&seeds[threadNum]) % (upperBuyerBound - lowerBuyerBound); buyerIndex = lowerBuyerBound + hiprand(&state) % (upperBuyerBound - lowerBuyerBound); } while (Buyers[buyerIndex].quantityHeld == 1); //bidPrice = (rand_r(&seeds[threadNum]) % Buyers[buyerIndex].value) + 1; bidPrice = (hiprand(&state) % Buyers[buyerIndex].value) + 1; // then pick an 'ask' price randomly between the agent's private value and maxSellerValue; // do { //sellerIndex = lowerSellerBound + rand_r(&seeds[threadNum]) % (upperSellerBound - lowerSellerBound); sellerIndex = lowerSellerBound + hiprand(&state) % (upperSellerBound - lowerSellerBound); } while (Sellers[sellerIndex].quantityHeld != 1); //@askPrice = Sellers[sellerIndex].value + (rand_r(&seeds[threadNum]) % (maxSellerValue - Sellers[sellerIndex].value + 1)); askPrice = Sellers[sellerIndex].value + (hiprand(&state) % (maxSellerValue - Sellers[sellerIndex].value + 1)); // Pick a seller at random who has not already sold a unit, // Let's see if a deal can be made... // if (bidPrice >= askPrice) { // First, compute the transaction price... // transactionPrice = askPrice + hiprand(&state) % (bidPrice - askPrice + 1); Buyers[buyerIndex].price = transactionPrice; Sellers[sellerIndex].price = transactionPrice; // // Then execute the exchange... // Buyers[buyerIndex].quantityHeld = 1; Sellers[sellerIndex].quantityHeld = 0; }; //return 0; }; } // DoTrades() void ComputeStatistics(clock_t elapsedTime) // // Determine the total quantities bought and sold... // ...as well as statistics about prices // { int i; int numberBought = 0; int numberSold= 0; int sum = 0; double sum2 = 0.0; int N = 0; double avgPrice, sd; // First, compute the quantity purchased... // for (i=0; i<numberOfBuyers; i++) if (Buyers[i].quantityHeld == 1) numberBought++; // Next, get the quantity sold... // for (i=0; i<numberOfSellers; i++) if (Sellers[i].quantityHeld == 0) numberSold++; // Now let's compute the average price paid as well as the standard deviation... // for (i=0; i<numberOfBuyers; i++) if (Buyers[i].quantityHeld == 1) { sum += Buyers[i].price; sum2 += pow(Buyers[i].price, 2); N++; }; for (i=0; i<numberOfSellers; i++) if (Sellers[i].quantityHeld == 0) { sum += Sellers[i].price; sum2 += pow(Sellers[i].price, 2); N++; }; avgPrice = (double) sum / (double) N; sd = sqrt((sum2 - (double) N * pow(avgPrice, 2)) / (double) (N - 1)); printf("%i items bought and %i items sold\n", numberBought, numberSold); printf("The average price = %f and the s.d. is %f\n", avgPrice, sd); printf("The total time on CPUs was %f seconds\n", (double) elapsedTime/CLOCKS_PER_SEC); } // ComputeStatistics() void OpenMarket() { clock_t startTime1, endTime1; time_t startTime2, endTime2; /* int threadNumber, status; pthread_t threads[numThreads]; int args[numThreads]; void *threadResult[numThreads]; */ startTime1 = clock(); time(&startTime2); /* for (threadNumber = 0; threadNumber < numThreads; threadNumber++) { args[threadNumber] = threadNumber; status = pthread_create(&threads[threadNumber], NULL, DoTrades, &args[threadNumber]); if (status != 0) printf("Problem launching thread %i", threadNumber); }; for (threadNumber = 0; threadNumber < numThreads; threadNumber++) { status = pthread_join(threads[threadNumber], &threadResult[threadNumber]); if (status != 0) printf("Problem joining thread %i",threadNumber); }; for (threadNumber = 0; threadNumber < numThreads; threadNumber++) if (threadResult[threadNumber] != 0) printf("Problem with termination of thread %i\n", threadNumber); */ hipLaunchKernelGGL(( doTrades), dim3(1), dim3(numThreads), 0, 0, Buyers, Sellers); hipDeviceSynchronize(); endTime1 = clock(); time(&endTime2); ComputeStatistics(endTime1 - startTime1); endTime2 = (endTime2 - startTime2); printf("Wall time: %d seconds\n", (int) endTime2); } int main() { printf("\nZERO INTELLIGENCE TRADERS\n"); // printf("%d",sizeof(Agent)); InitializeMiscellaneous(); InitializeAgents(); OpenMarket(); return(0); }
0ba5f8f135546bf100d7715aa26671afa1c6a561.cu
/* Zero Intelligence Traders Implemented for CUDA devices for comparison with CPU/pthreads version. Intended to follow logically as closely as possbile to a CPU version written by Robert Axtell. Previous code used with permission by: Robert Axtell The Brookings Institution and George Mason University */ #include <stdio.h> #include <curand.h> #include <curand_kernel.h> #define false 0 #define true 1 #define seedRandomWithTime false // if false, seed the generator with 'seed' #define seed 1 #define buyer true #define seller false // Specify the maximum internal values... #define maxBuyerValue 20 #define maxSellerValue 20 // Specify the number of agents of each type... #define numberOfBuyers 1000000 #define numberOfSellers 1000000 #define MaxNumberOfTrades 100000000 #define numThreads 512 // Define an agent... typedef struct { int buyerOrSeller; int quantityHeld; int value; int price; } Agent; // Declare the agent populations... //Agent Buyers[numberOfBuyers]; //Agent Sellers[numberOfSellers]; Agent *Buyers; Agent *Sellers; const int agentsPerThread = numberOfBuyers/numThreads; const int tradesPerThread = MaxNumberOfTrades/numThreads; // Seeds for one random number generator per thread unsigned int seeds[numThreads]; ///////////////// // // Procedures... // ///////////////// void InitializeMiscellaneous() // // Initialize the random number generator; cannot use srand() or rand() as these are not thread safe { unsigned int i; for (i = 0; i<numThreads; i++) if (seedRandomWithTime) seeds[i] = (unsigned int) time(NULL); else seeds[i] = seed + i; } // InitializeMiscellaneous() void InitializeAgents() // // Fill the agent fields... // { // allocate managed memory for buyers and sellers size_t agentSize = numberOfBuyers * sizeof(Agent); cudaMallocManaged(&Buyers, agentSize); cudaMallocManaged(&Sellers, agentSize); int i; // First the buyers... for (i=0; i<numberOfBuyers; i=i+1) { Buyers[i].buyerOrSeller = buyer; Buyers[i].quantityHeld = 0; Buyers[i].value = (rand_r(&seeds[0]) % maxBuyerValue) + 1; }; // Now the sellers... for (i=0; i<numberOfSellers; i=i+1) { Sellers[i].buyerOrSeller = seller; Sellers[i].quantityHeld = 1; Sellers[i].value = (rand_r(&seeds[0]) % maxSellerValue) + 1; }; } // InitializeAgents() //void *DoTrades (void *threadN) __global__ void doTrades(Agent *Buyers, Agent *Sellers) { // // This function pairs agents at random and then selects a price randomly... // curandState_t state; curand_init(0, 0, 0, &state); int i, buyerIndex, sellerIndex; int bidPrice, askPrice, transactionPrice; //int threadNum = *(int*) threadN; int threadNum = threadIdx.x; int lowerBuyerBound, upperBuyerBound, lowerSellerBound, upperSellerBound; /* if (numThreads <= 10) printf("Thread %i up and running\n", threadNum); */ lowerBuyerBound = threadNum * agentsPerThread; upperBuyerBound = (threadNum + 1) * agentsPerThread - 1; lowerSellerBound = threadNum * agentsPerThread; upperSellerBound = (threadNum + 1) * agentsPerThread - 1; for (i=1; i<=tradesPerThread; i++) { // Pick a buyer at random who has not already bought a unit, // then pick a 'bid' price randomly between 1 and the agent's private value; // do { //buyerIndex = lowerBuyerBound + rand_r(&seeds[threadNum]) % (upperBuyerBound - lowerBuyerBound); buyerIndex = lowerBuyerBound + curand(&state) % (upperBuyerBound - lowerBuyerBound); } while (Buyers[buyerIndex].quantityHeld == 1); //bidPrice = (rand_r(&seeds[threadNum]) % Buyers[buyerIndex].value) + 1; bidPrice = (curand(&state) % Buyers[buyerIndex].value) + 1; // then pick an 'ask' price randomly between the agent's private value and maxSellerValue; // do { //sellerIndex = lowerSellerBound + rand_r(&seeds[threadNum]) % (upperSellerBound - lowerSellerBound); sellerIndex = lowerSellerBound + curand(&state) % (upperSellerBound - lowerSellerBound); } while (Sellers[sellerIndex].quantityHeld != 1); //@askPrice = Sellers[sellerIndex].value + (rand_r(&seeds[threadNum]) % (maxSellerValue - Sellers[sellerIndex].value + 1)); askPrice = Sellers[sellerIndex].value + (curand(&state) % (maxSellerValue - Sellers[sellerIndex].value + 1)); // Pick a seller at random who has not already sold a unit, // Let's see if a deal can be made... // if (bidPrice >= askPrice) { // First, compute the transaction price... // transactionPrice = askPrice + curand(&state) % (bidPrice - askPrice + 1); Buyers[buyerIndex].price = transactionPrice; Sellers[sellerIndex].price = transactionPrice; // // Then execute the exchange... // Buyers[buyerIndex].quantityHeld = 1; Sellers[sellerIndex].quantityHeld = 0; }; //return 0; }; } // DoTrades() void ComputeStatistics(clock_t elapsedTime) // // Determine the total quantities bought and sold... // ...as well as statistics about prices // { int i; int numberBought = 0; int numberSold= 0; int sum = 0; double sum2 = 0.0; int N = 0; double avgPrice, sd; // First, compute the quantity purchased... // for (i=0; i<numberOfBuyers; i++) if (Buyers[i].quantityHeld == 1) numberBought++; // Next, get the quantity sold... // for (i=0; i<numberOfSellers; i++) if (Sellers[i].quantityHeld == 0) numberSold++; // Now let's compute the average price paid as well as the standard deviation... // for (i=0; i<numberOfBuyers; i++) if (Buyers[i].quantityHeld == 1) { sum += Buyers[i].price; sum2 += pow(Buyers[i].price, 2); N++; }; for (i=0; i<numberOfSellers; i++) if (Sellers[i].quantityHeld == 0) { sum += Sellers[i].price; sum2 += pow(Sellers[i].price, 2); N++; }; avgPrice = (double) sum / (double) N; sd = sqrt((sum2 - (double) N * pow(avgPrice, 2)) / (double) (N - 1)); printf("%i items bought and %i items sold\n", numberBought, numberSold); printf("The average price = %f and the s.d. is %f\n", avgPrice, sd); printf("The total time on CPUs was %f seconds\n", (double) elapsedTime/CLOCKS_PER_SEC); } // ComputeStatistics() void OpenMarket() { clock_t startTime1, endTime1; time_t startTime2, endTime2; /* int threadNumber, status; pthread_t threads[numThreads]; int args[numThreads]; void *threadResult[numThreads]; */ startTime1 = clock(); time(&startTime2); /* for (threadNumber = 0; threadNumber < numThreads; threadNumber++) { args[threadNumber] = threadNumber; status = pthread_create(&threads[threadNumber], NULL, DoTrades, &args[threadNumber]); if (status != 0) printf("Problem launching thread %i", threadNumber); }; for (threadNumber = 0; threadNumber < numThreads; threadNumber++) { status = pthread_join(threads[threadNumber], &threadResult[threadNumber]); if (status != 0) printf("Problem joining thread %i",threadNumber); }; for (threadNumber = 0; threadNumber < numThreads; threadNumber++) if (threadResult[threadNumber] != 0) printf("Problem with termination of thread %i\n", threadNumber); */ doTrades<<<1, numThreads>>>(Buyers, Sellers); cudaDeviceSynchronize(); endTime1 = clock(); time(&endTime2); ComputeStatistics(endTime1 - startTime1); endTime2 = (endTime2 - startTime2); printf("Wall time: %d seconds\n", (int) endTime2); } int main() { printf("\nZERO INTELLIGENCE TRADERS\n"); // printf("%d",sizeof(Agent)); InitializeMiscellaneous(); InitializeAgents(); OpenMarket(); return(0); }
0e1c184c1a78bb5aa198e7fa8f43a9df0a6fd699.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef __NVCC__ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #else #include <__clang_cuda_runtime_wrapper.h> #endif #include <sys/time.h> #define BIGRND 0x7fffffff #define GPU #define THREADS 256 #define WIDTH 16 // shared memory width #define HEIGHT 16 // shared memory height #define ETA 0.3 // eta value #define MOMENTUM 0.3 // momentum value #define NUM_THREAD 4 // OpenMP threads #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) //////////////////////////////////////////////////////////////////////////////// typedef struct { int input_n; /* number of input units */ int hidden_n; /* number of hidden units */ int output_n; /* number of output units */ float *input_units; /* the input units */ float *hidden_units; /* the hidden units */ float *output_units; /* the output units */ float *hidden_delta; /* storage for hidden unit error */ float *output_delta; /* storage for output unit error */ float *target; /* storage for target vector */ float **input_weights; /* weights from input to hidden layer */ float **hidden_weights; /* weights from hidden to output layer */ /*** The next two are for momentum ***/ float **input_prev_weights; /* previous change on input to hidden wgt */ float **hidden_prev_weights; /* previous change on hidden to output wgt */ } BPNN; int layer_size = 0; unsigned int num_threads = 0; unsigned int num_blocks = 0; extern "C" __global__ void bpnn_layerforward_CUDA( __attribute__((annotate("640001"))) float *input_cuda, __attribute__((annotate("17"))) float *output_hidden_cuda, __attribute__((annotate("17,640001"))) float *input_hidden_cuda, __attribute__((annotate("16,40000"))) float *hidden_partial_sum, __attribute__((annotate("640000"))) int in, __attribute__((annotate("16"))) int hid) __attribute__((annotate("1,40000:16,16"))) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if (tx == 0) input_node[ty] = input_cuda[index_in]; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for (int i = 1; i <= __log2f(HEIGHT); i++) { int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } extern "C" __global__ void bpnn_adjust_weights_cuda(__attribute__((annotate("17"))) float *delta, __attribute__((annotate("16"))) int hid, __attribute__((annotate("640001"))) float *ly, __attribute__((annotate("640000"))) int in, __attribute__((annotate("17,640001"))) float *w, __attribute__((annotate("17,640001"))) float *oldw) __attribute__((annotate("1,40000:16,16"))) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by == 0) { w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } extern "C" float squash(float x) { // float m; // x = -x; // m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; // return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } extern "C" void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2) { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } extern "C" void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err) { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } extern "C" void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err) { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } extern "C" void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw) { float new_dw; int k, j; ly[0] = 1.0; // eta = 0.3; // momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(oldw, w, delta) private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } extern "C" float *alloc_1d_dbl(int n) { float *p; p = (float *)malloc((unsigned)(n * sizeof(float))); if (p == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return p; } extern "C" float **alloc_2d_dbl(int m, int n) { int i; float **p; p = (float **)malloc((unsigned)(m * sizeof(float *))); if (p == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { p[i] = alloc_1d_dbl(n); } return p; } extern "C" void bpnn_initialize(int seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } extern "C" BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *)malloc(sizeof(BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } extern "C" void bpnn_randomize_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float)rand() / (float)RAND_MAX; } } } extern "C" void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { // w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } extern "C" void bpnn_zero_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } extern "C" BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } extern "C" void load(BPNN *net) { int nr = layer_size; // int nc = ??? // KERMA: was not initialized // int imgsize = nr * nc; // KERMA: never used float *units = net->input_units; int k = 1; for (int i = 0; i < nr; i++) { units[k] = (float)rand() / (float)RAND_MAX; k++; } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } extern "C" void bpnn_train_cuda(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; int m = 0; float *input_hidden_cuda; float *input_cuda; float *output_hidden_cuda; float *partial_sum; float *hidden_partial_sum; float *hidden_delta_cuda; float *input_prev_weights_cuda; float sum; float *input_weights_one_dim; float *input_weights_prev_one_dim; num_blocks = in / 16; dim3 grid(1, num_blocks); dim3 threads(16, 16); input_weights_one_dim = (float *)malloc((in + 1) * (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *)malloc((in + 1) * (hid + 1) * sizeof(float)); partial_sum = (float *)malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is added to correct the bugs of wrong memcopy // using two-dimensional net->inputweights for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net->input_prev_weights[k][j]; m++; } } hipMalloc((void **)&input_cuda, (in + 1) * sizeof(float)); hipMalloc((void **)&output_hidden_cuda, (hid + 1) * sizeof(float)); hipMalloc((void **)&input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float)); hipMalloc((void **)&hidden_partial_sum, num_blocks * WIDTH * sizeof(float)); printf("Performing GPU computation\n"); hipMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); double t_start = rtclock(); hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(grid), dim3(threads), 0, 0, input_cuda, output_hidden_cuda, input_hidden_cuda, hidden_partial_sum, in, hid); // hipDeviceSynchronize(); hipDeviceSynchronize(); double t_end = rtclock(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("bpnn kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), hipMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.0; for (int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j - 1]; } sum += net->input_weights[0][j]; net->hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); hipMalloc((void **)&hidden_delta_cuda, (hid + 1) * sizeof(float)); hipMalloc((void **)&input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float)); hipMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); double t_start2 = rtclock(); hipLaunchKernelGGL(( bpnn_adjust_weights_cuda), dim3(grid), dim3(threads), 0, 0, hidden_delta_cuda, hid, input_cuda, in, input_hidden_cuda, input_prev_weights_cuda); hipDeviceSynchronize(); double t_end2 = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", (t_end - t_start) + (t_end2 - t_start2)); hipMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyDeviceToHost); hipFree(input_cuda); hipFree(output_hidden_cuda); hipFree(input_hidden_cuda); hipFree(hidden_partial_sum); hipFree(input_prev_weights_cuda); hipFree(hidden_delta_cuda); free(partial_sum); free(input_weights_one_dim); free(input_weights_prev_one_dim); } void bpnn_free(BPNN *net) { int n1 = net->input_n; int n2 = net->hidden_n; free((char *)net->input_units); free((char *)net->hidden_units); free((char *)net->output_units); free((char *)net->hidden_delta); free((char *)net->output_delta); free((char *)net->target); for (int i = 0; i <= n1; i++) { free((char *)net->input_weights[i]); free((char *)net->input_prev_weights[i]); } free((char *)net->input_weights); free((char *)net->input_prev_weights); for (int i = 0; i <= n2; i++) { free((char *)net->hidden_weights[i]); free((char *)net->hidden_prev_weights[i]); } free((char *)net->hidden_weights); free((char *)net->hidden_prev_weights); free(net); } extern "C" void backprop_face() { BPNN *net; // int i; // KERMA: never used float out_err, hid_err; net = bpnn_create(layer_size, 16, 1); // (16, 1 can not be changed) printf("Input layer size : %d\n", layer_size); load(net); // entering the training kernel, only one iteration printf("Starting training kernel\n"); bpnn_train_cuda(net, &out_err, &hid_err); bpnn_free(net); printf("Training done\n"); } int setup(int argc, char *argv[]) { int seed; if (argc != 2) { fprintf(stderr, "usage: backprop <num of input elements>\n"); exit(0); } layer_size = atoi(argv[1]); if (layer_size % 16 != 0) { fprintf(stderr, "The number of input points must be divided by 16\n"); exit(0); } seed = 7; bpnn_initialize(seed); backprop_face(); return 0; } double gettime() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec * 1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { return setup(argc, argv); }
0e1c184c1a78bb5aa198e7fa8f43a9df0a6fd699.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef __NVCC__ #include <cuda.h> #include <cuda_runtime.h> #else #include <__clang_cuda_runtime_wrapper.h> #endif #include <sys/time.h> #define BIGRND 0x7fffffff #define GPU #define THREADS 256 #define WIDTH 16 // shared memory width #define HEIGHT 16 // shared memory height #define ETA 0.3 // eta value #define MOMENTUM 0.3 // momentum value #define NUM_THREAD 4 // OpenMP threads #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) //////////////////////////////////////////////////////////////////////////////// typedef struct { int input_n; /* number of input units */ int hidden_n; /* number of hidden units */ int output_n; /* number of output units */ float *input_units; /* the input units */ float *hidden_units; /* the hidden units */ float *output_units; /* the output units */ float *hidden_delta; /* storage for hidden unit error */ float *output_delta; /* storage for output unit error */ float *target; /* storage for target vector */ float **input_weights; /* weights from input to hidden layer */ float **hidden_weights; /* weights from hidden to output layer */ /*** The next two are for momentum ***/ float **input_prev_weights; /* previous change on input to hidden wgt */ float **hidden_prev_weights; /* previous change on hidden to output wgt */ } BPNN; int layer_size = 0; unsigned int num_threads = 0; unsigned int num_blocks = 0; extern "C" __global__ void bpnn_layerforward_CUDA( __attribute__((annotate("640001"))) float *input_cuda, __attribute__((annotate("17"))) float *output_hidden_cuda, __attribute__((annotate("17,640001"))) float *input_hidden_cuda, __attribute__((annotate("16,40000"))) float *hidden_partial_sum, __attribute__((annotate("640000"))) int in, __attribute__((annotate("16"))) int hid) __attribute__((annotate("1,40000:16,16"))) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if (tx == 0) input_node[ty] = input_cuda[index_in]; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for (int i = 1; i <= __log2f(HEIGHT); i++) { int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } extern "C" __global__ void bpnn_adjust_weights_cuda(__attribute__((annotate("17"))) float *delta, __attribute__((annotate("16"))) int hid, __attribute__((annotate("640001"))) float *ly, __attribute__((annotate("640000"))) int in, __attribute__((annotate("17,640001"))) float *w, __attribute__((annotate("17,640001"))) float *oldw) __attribute__((annotate("1,40000:16,16"))) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by == 0) { w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } extern "C" float squash(float x) { // float m; // x = -x; // m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; // return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } extern "C" void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2) { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } extern "C" void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err) { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } extern "C" void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err) { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } extern "C" void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw) { float new_dw; int k, j; ly[0] = 1.0; // eta = 0.3; // momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(oldw, w, delta) private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } extern "C" float *alloc_1d_dbl(int n) { float *p; p = (float *)malloc((unsigned)(n * sizeof(float))); if (p == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return p; } extern "C" float **alloc_2d_dbl(int m, int n) { int i; float **p; p = (float **)malloc((unsigned)(m * sizeof(float *))); if (p == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { p[i] = alloc_1d_dbl(n); } return p; } extern "C" void bpnn_initialize(int seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } extern "C" BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *)malloc(sizeof(BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } extern "C" void bpnn_randomize_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float)rand() / (float)RAND_MAX; } } } extern "C" void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { // w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } extern "C" void bpnn_zero_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } extern "C" BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } extern "C" void load(BPNN *net) { int nr = layer_size; // int nc = ??? // KERMA: was not initialized // int imgsize = nr * nc; // KERMA: never used float *units = net->input_units; int k = 1; for (int i = 0; i < nr; i++) { units[k] = (float)rand() / (float)RAND_MAX; k++; } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } extern "C" void bpnn_train_cuda(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; int m = 0; float *input_hidden_cuda; float *input_cuda; float *output_hidden_cuda; float *partial_sum; float *hidden_partial_sum; float *hidden_delta_cuda; float *input_prev_weights_cuda; float sum; float *input_weights_one_dim; float *input_weights_prev_one_dim; num_blocks = in / 16; dim3 grid(1, num_blocks); dim3 threads(16, 16); input_weights_one_dim = (float *)malloc((in + 1) * (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *)malloc((in + 1) * (hid + 1) * sizeof(float)); partial_sum = (float *)malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is added to correct the bugs of wrong memcopy // using two-dimensional net->inputweights for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net->input_prev_weights[k][j]; m++; } } cudaMalloc((void **)&input_cuda, (in + 1) * sizeof(float)); cudaMalloc((void **)&output_hidden_cuda, (hid + 1) * sizeof(float)); cudaMalloc((void **)&input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float)); cudaMalloc((void **)&hidden_partial_sum, num_blocks * WIDTH * sizeof(float)); printf("Performing GPU computation\n"); cudaMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); double t_start = rtclock(); bpnn_layerforward_CUDA<<<grid, threads>>>(input_cuda, output_hidden_cuda, input_hidden_cuda, hidden_partial_sum, in, hid); // cudaThreadSynchronize(); cudaDeviceSynchronize(); double t_end = rtclock(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("bpnn kernel error: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), cudaMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.0; for (int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j - 1]; } sum += net->input_weights[0][j]; net->hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); cudaMalloc((void **)&hidden_delta_cuda, (hid + 1) * sizeof(float)); cudaMalloc((void **)&input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float)); cudaMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); double t_start2 = rtclock(); bpnn_adjust_weights_cuda<<<grid, threads>>>(hidden_delta_cuda, hid, input_cuda, in, input_hidden_cuda, input_prev_weights_cuda); cudaDeviceSynchronize(); double t_end2 = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", (t_end - t_start) + (t_end2 - t_start2)); cudaMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(input_cuda); cudaFree(output_hidden_cuda); cudaFree(input_hidden_cuda); cudaFree(hidden_partial_sum); cudaFree(input_prev_weights_cuda); cudaFree(hidden_delta_cuda); free(partial_sum); free(input_weights_one_dim); free(input_weights_prev_one_dim); } void bpnn_free(BPNN *net) { int n1 = net->input_n; int n2 = net->hidden_n; free((char *)net->input_units); free((char *)net->hidden_units); free((char *)net->output_units); free((char *)net->hidden_delta); free((char *)net->output_delta); free((char *)net->target); for (int i = 0; i <= n1; i++) { free((char *)net->input_weights[i]); free((char *)net->input_prev_weights[i]); } free((char *)net->input_weights); free((char *)net->input_prev_weights); for (int i = 0; i <= n2; i++) { free((char *)net->hidden_weights[i]); free((char *)net->hidden_prev_weights[i]); } free((char *)net->hidden_weights); free((char *)net->hidden_prev_weights); free(net); } extern "C" void backprop_face() { BPNN *net; // int i; // KERMA: never used float out_err, hid_err; net = bpnn_create(layer_size, 16, 1); // (16, 1 can not be changed) printf("Input layer size : %d\n", layer_size); load(net); // entering the training kernel, only one iteration printf("Starting training kernel\n"); bpnn_train_cuda(net, &out_err, &hid_err); bpnn_free(net); printf("Training done\n"); } int setup(int argc, char *argv[]) { int seed; if (argc != 2) { fprintf(stderr, "usage: backprop <num of input elements>\n"); exit(0); } layer_size = atoi(argv[1]); if (layer_size % 16 != 0) { fprintf(stderr, "The number of input points must be divided by 16\n"); exit(0); } seed = 7; bpnn_initialize(seed); backprop_face(); return 0; } double gettime() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec * 1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { return setup(argc, argv); }
a7976cc92053d24b6d179d54c3ea0b8354b7632d.hip
// !!! This is a file automatically generated by hipify!!! // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH), verbose(verb) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, fsiGeneralData, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Initialize() { hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Initialize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(SolverType type) { switch (type) { case SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
a7976cc92053d24b6d179d54c3ea0b8354b7632d.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH), verbose(verb) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, fsiGeneralData, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Initialize() { cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Initialize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(SolverType type) { switch (type) { case SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
571a711fdee95bd8cd1938d591a8061beed0132d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> //for random intialize #include <stdlib.h> #include <time.h> //for memset #include <cstring> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char * file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line ); if (abort) exit(code); } } __global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size) { int gid = blockIdx.x *blockDim.x +threadIdx.x; if(gid <= size) { result[gid] = a[gid] + b[gid] + c[gid]; } } void sum_array_cpu( int* a, int* b, int* c, int* result, int size) { for (int i=0; i < size; i++) { result[i] = a[i] + b[i] + c[i]; } } void compare_arrays (int* gpu, int* cpu, int size){ for ( int i = 0; i < size ; i++){ if(gpu[i]!= cpu[i]){ printf("Arrays are different \n"); return; } } printf("Arrays are same \n"); } int main() { int size = pow(2,22); int block_size = 512; int NO_BYTES = size * sizeof(int); // Allocate memory in Host int* h_a, *h_b, *h_c, *gpu_results, *cpu_results; h_a = (int*)malloc(NO_BYTES); h_b = (int*)malloc(NO_BYTES); h_c = (int*)malloc(NO_BYTES); cpu_results = (int*)malloc(NO_BYTES); gpu_results = (int*)malloc(NO_BYTES); time_t t; srand((unsigned)time(&t)); // Initialise random values for the array for (int i=0; i <size; i++) { h_a[i] = (int)(rand() & 0xff); } for (int i=0; i <size; i++) { h_b[i] = (int)(rand() & 0xff); } for (int i=0; i <size; i++) { h_c[i] = (int)(rand() & 0xff); } memset(gpu_results,0,NO_BYTES); memset(cpu_results,0,NO_BYTES); //Summation in CPU clock_t cpu_start, cpu_end; cpu_start = clock(); sum_array_cpu(h_a, h_b, h_c, cpu_results, size); cpu_end = clock(); // Allocate memory in device int* d_a, *d_b, *d_c, *d_result; gpuErrchk(hipMalloc((int**)&d_a,NO_BYTES)); gpuErrchk(hipMalloc((int**)&d_b,NO_BYTES)); gpuErrchk(hipMalloc((int**)&d_c,NO_BYTES)); gpuErrchk(hipMalloc((int**)&d_result,NO_BYTES)); clock_t htod_start, htod_end; htod_start = clock(); // Transfer the data from host to device gpuErrchk(hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_c, h_c, NO_BYTES, hipMemcpyHostToDevice)); htod_end = clock(); // Designing grid and block size dim3 block(block_size); dim3 grid((size/block.x)+1); // Launch kernel function clock_t gpu_start, gpu_end; gpu_start = clock(); sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size); hipDeviceSynchronize(); gpu_end = clock(); clock_t dtoh_start, dtoh_end; dtoh_start = clock(); gpuErrchk(hipMemcpy(gpu_results, d_result, NO_BYTES, hipMemcpyDeviceToHost)); dtoh_end = clock(); //compare the arrays compare_arrays(gpu_results,cpu_results, size); printf("Sum array CPU execution time : %4.6f \n", (double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC)); printf("Sum array GPU execution time : %4.6f \n", (double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC)); printf("htod mem transfer time : %4.6f \n", (double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC)); printf("dtoh mem transfer time : %4.6f \n", (double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC)); printf("Sum array GPU total execution time : %4.6f \n", (double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC)); hipFree(d_result); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(gpu_results); free(h_a); free(h_b); free(h_c); hipDeviceReset(); }
571a711fdee95bd8cd1938d591a8061beed0132d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> //for random intialize #include <stdlib.h> #include <time.h> //for memset #include <cstring> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char * file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line ); if (abort) exit(code); } } __global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size) { int gid = blockIdx.x *blockDim.x +threadIdx.x; if(gid <= size) { result[gid] = a[gid] + b[gid] + c[gid]; } } void sum_array_cpu( int* a, int* b, int* c, int* result, int size) { for (int i=0; i < size; i++) { result[i] = a[i] + b[i] + c[i]; } } void compare_arrays (int* gpu, int* cpu, int size){ for ( int i = 0; i < size ; i++){ if(gpu[i]!= cpu[i]){ printf("Arrays are different \n"); return; } } printf("Arrays are same \n"); } int main() { int size = pow(2,22); int block_size = 512; int NO_BYTES = size * sizeof(int); // Allocate memory in Host int* h_a, *h_b, *h_c, *gpu_results, *cpu_results; h_a = (int*)malloc(NO_BYTES); h_b = (int*)malloc(NO_BYTES); h_c = (int*)malloc(NO_BYTES); cpu_results = (int*)malloc(NO_BYTES); gpu_results = (int*)malloc(NO_BYTES); time_t t; srand((unsigned)time(&t)); // Initialise random values for the array for (int i=0; i <size; i++) { h_a[i] = (int)(rand() & 0xff); } for (int i=0; i <size; i++) { h_b[i] = (int)(rand() & 0xff); } for (int i=0; i <size; i++) { h_c[i] = (int)(rand() & 0xff); } memset(gpu_results,0,NO_BYTES); memset(cpu_results,0,NO_BYTES); //Summation in CPU clock_t cpu_start, cpu_end; cpu_start = clock(); sum_array_cpu(h_a, h_b, h_c, cpu_results, size); cpu_end = clock(); // Allocate memory in device int* d_a, *d_b, *d_c, *d_result; gpuErrchk(cudaMalloc((int**)&d_a,NO_BYTES)); gpuErrchk(cudaMalloc((int**)&d_b,NO_BYTES)); gpuErrchk(cudaMalloc((int**)&d_c,NO_BYTES)); gpuErrchk(cudaMalloc((int**)&d_result,NO_BYTES)); clock_t htod_start, htod_end; htod_start = clock(); // Transfer the data from host to device gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_c, h_c, NO_BYTES, cudaMemcpyHostToDevice)); htod_end = clock(); // Designing grid and block size dim3 block(block_size); dim3 grid((size/block.x)+1); // Launch kernel function clock_t gpu_start, gpu_end; gpu_start = clock(); sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size); cudaDeviceSynchronize(); gpu_end = clock(); clock_t dtoh_start, dtoh_end; dtoh_start = clock(); gpuErrchk(cudaMemcpy(gpu_results, d_result, NO_BYTES, cudaMemcpyDeviceToHost)); dtoh_end = clock(); //compare the arrays compare_arrays(gpu_results,cpu_results, size); printf("Sum array CPU execution time : %4.6f \n", (double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC)); printf("Sum array GPU execution time : %4.6f \n", (double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC)); printf("htod mem transfer time : %4.6f \n", (double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC)); printf("dtoh mem transfer time : %4.6f \n", (double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC)); printf("Sum array GPU total execution time : %4.6f \n", (double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC)); cudaFree(d_result); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(gpu_results); free(h_a); free(h_b); free(h_c); cudaDeviceReset(); }
f093d02e1a8fecd447bf0c9d35d3dd8e886d6382.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/cuda/topk_kernels.cuh" #include "hipcub/hipcub.hpp" namespace fastertransformer { __global__ void ker_curand_setup(hiprandState_t* state, const int size) { // hiprand_init(clock(), blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); // fix the seed to prevent the seed of different gpu are differnet in Tensor Parallel // if(threadIdx.x + blockIdx.x * blockDim.x < size) // hiprand_init(0, blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); if(threadIdx.x + blockIdx.x * blockDim.x < size) hiprand_init(clock(), blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); } void ker_curand_setupLauncher(hiprandState_t* state, DecodingSamplingArguments args, hipStream_t stream) { dim3 block(256); dim3 grid((int)(ceil(args.batch_size_ * 1.0 / 256))); hipLaunchKernelGGL(( ker_curand_setup), dim3(grid), dim3(block), 0, stream , state, args.batch_size_); } template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_topK_kernel(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int vocab_size, T diversity_rate) { typedef hipcub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(log_probs[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if (thread_id == 0) { int index = block_id * MAX_K; #pragma unroll for(int i = 0; i < MAX_K; ++i) { topk_tmp_id_buf[index + i] = total.p[i]; topk_tmp_val_buf[index + i] = total.u[i] + diversity_rate * (T)i; } } } #define CASE_K(K) \ case K : \ hipLaunchKernelGGL(( beam_topK_kernel<T, K, block_size>), dim3(batch_size), dim3(block_size), 0, stream, log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, 0.0f); \ break; \ template<typename T> void beam_topK_kernelLauncher(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, DecodingSamplingArguments args, hipStream_t stream) { const int batch_size = args.batch_size_; const int vocab_size = args.vocab_size_padded_; const int candidate_num = args.candidate_num_; const int block_size = 256; switch(candidate_num) { CASE_K(1); CASE_K(2); CASE_K(4); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } } #undef CASE_K template void beam_topK_kernelLauncher(const float* log_probs, int* topk_tmp_id_buf, float* topk_tmp_val_buf, DecodingSamplingArguments args, hipStream_t stream); template void beam_topK_kernelLauncher(const half* log_probs, int* topk_tmp_id_buf, half* topk_tmp_val_buf, DecodingSamplingArguments args, hipStream_t stream); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* id_buf) { int thread_id = threadIdx.x; int block_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; TopK<T, MAX_K> partial; if (thread_id == 0) { for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } int index = block_id * MAX_K * MAX_K; for(int i = 0; i < MAX_K * MAX_K; i++) { partial.insert( (T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); } index = block_id * MAX_K; for(int i = 0; i < MAX_K; i++) { id_buf[index + i] = partial.p[i]; } } } template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel_v2(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* id_buf) { typedef hipcub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x; int bid = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } int ite = MAX_K * MAX_K / THREADBLOCK_SIZE; #pragma unroll for(int i = 0; i < ite; i++) { int index = bid * MAX_K * MAX_K + i * THREADBLOCK_SIZE + tid; partial.insert( (T)topk_tmp_val_buf[index], topk_tmp_id_buf[index]); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(tid == 0) { #pragma unroll for(int i = 0; i < MAX_K; i++) id_buf[bid * MAX_K + i] = total.p[i]; } } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_1_opt3( const T* __restrict log_probs, T* tmp_log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const bool* finished, const int k, const int vocab_size, const int end_id ) { typedef hipcub::BlockReduce<TopK_2<T>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; const int row_id = bid / BLOCKS_PER_BEAM_; // row id for log_probs const int block_lane = bid % BLOCKS_PER_BEAM_; // block id for a beam const int tmp_log_buf_index = row_id * vocab_size; const int tmp_topk_buf_index = row_id * BLOCKS_PER_BEAM_ * k + block_lane * k; TopK_2<T> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; if(finished != nullptr && finished[row_id] == true) { if(tid < k) { const int index = tmp_topk_buf_index + tid; if(block_lane == 0 && tid == 0) { topk_tmp_id_buf[index] = tmp_log_buf_index + end_id; topk_tmp_val_buf[index] = log_probs[tmp_log_buf_index + end_id]; } else { topk_tmp_id_buf[index] = -1; topk_tmp_val_buf[index] = -MAX_T_VAL; } } return; } for(int elem_id = tid + block_lane * BLOCK_SIZE_; elem_id < vocab_size; elem_id += BLOCK_SIZE_ * BLOCKS_PER_BEAM_) { int index = elem_id + tmp_log_buf_index; tmp_log_probs[index] = log_probs[index]; } for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int elem_id = tid + block_lane * BLOCK_SIZE_; elem_id < vocab_size; elem_id += BLOCK_SIZE_ * BLOCKS_PER_BEAM_) { int index = elem_id + tmp_log_buf_index; partial.insert(tmp_log_probs[index], index); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if (tid == 0) { const int index = tmp_topk_buf_index + ite; topk_tmp_id_buf[index] = total.p; topk_tmp_val_buf[index] = total.u; tmp_log_probs[total.p] = -MAX_T_VAL; } __syncthreads(); } } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_2_opt3( const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, const int k) { const int size = k * k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef hipcub::BlockReduce<TopK_2<T>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); TopK_2<T> partial; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert(s_val[i], i); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; } __syncthreads(); } if(tid < k) ids[batch_id * k + tid] = topk_tmp_id_buf[batch_id * size + s_id[tid]]; } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_2_opt3_sampling(const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, T* topk_tmp2_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int k, hiprandState_t* curandstate, const int end_id, const int vocab_size) { const int size = k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef hipcub::BlockReduce<TopK_2<float>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; __shared__ float rand_num; __shared__ float s_sum; __shared__ float s_max; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); s_max = (float)0.0f; s_sum = (float)0.0f; TopK_2<float> partial; for(int index = tid; index < size; index += BLOCK_SIZE_) { topk_tmp2_val_buf[batch_id * size + index] = topk_tmp_val_buf[batch_id * size + index]; } __syncthreads(); T *s_val2 = topk_tmp2_val_buf + batch_id * size; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert((float)s_val[i], i); } TopK_2<float> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<float>); if(ite == 0) s_max = total.u; if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; total.u = __expf(total.u - s_max); s_val2[total.p] = (T)total.u; s_sum += total.u; } __syncthreads(); } if(tid == 0) { rand_num = (float)hiprand_uniform(curandstate + blockIdx.x) * s_sum; for(int i = 0; i < k; i++) { rand_num = rand_num - (float)s_val2[s_id[i]]; if(rand_num <= 0.0f) { ids[batch_id] = topk_tmp_id_buf[batch_id * size + s_id[i]] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[batch_id] = ids[batch_id] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[batch_id] = finished_buf[batch_id] ? sequence_length[batch_id] : sequence_length[batch_id] + 1; } } } } template<typename T, int BLOCK_SIZE, int BLOCKS_PER_BEAM> __global__ void topk_stage_1_opt2_general( const T* __restrict log_probs, T* tmp_log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int k, const int vocab_size ) { const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef hipcub::BlockReduce<TopK_2<T>, BLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; const int row_id = bid / BLOCKS_PER_BEAM; // row id for log_probs const int block_lane = bid % BLOCKS_PER_BEAM; // block id for a beam const int tmp_log_buf_index = row_id * vocab_size; const int tmp_topk_buf_index = row_id * BLOCKS_PER_BEAM * k + block_lane * k; TopK_2<T> partial; for(int elem_id = tid + block_lane * BLOCK_SIZE; elem_id < vocab_size; elem_id += BLOCK_SIZE * BLOCKS_PER_BEAM) { int index = elem_id + tmp_log_buf_index; tmp_log_probs[index] = log_probs[index]; } for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int elem_id = tid + block_lane * BLOCK_SIZE; elem_id < vocab_size; elem_id += BLOCK_SIZE * BLOCKS_PER_BEAM) { int index = elem_id + tmp_log_buf_index; partial.insert(tmp_log_probs[index], index); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if (tid == 0) { const int index = tmp_topk_buf_index + ite; topk_tmp_id_buf[index] = total.p; topk_tmp_val_buf[index] = total.u; tmp_log_probs[total.p] = -MAX_T_VAL; } __syncthreads(); } } template<typename T, int BLOCK_SIZE, int BLOCKS_PER_BEAM> __global__ void topk_stage_2_opt2_general( const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, const int k) { const int size = k * k * BLOCKS_PER_BEAM; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef hipcub::BlockReduce<TopK_2<T>, BLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); TopK_2<T> partial; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE) { partial.insert(s_val[i], i); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; } __syncthreads(); } if(tid < k) ids[batch_id * k + tid] = topk_tmp_id_buf[batch_id * size + s_id[tid]]; } #define CASE_K_DIV(K,BLOCK_SIZE_1, BLOCK_SIZE_2) \ case K: \ hipLaunchKernelGGL(( beam_topK_kernel<T, K, BLOCK_SIZE_2>), dim3(batch_size * beam_width), dim3(BLOCK_SIZE_2), 0, stream, log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); \ if (K < 10) \ hipLaunchKernelGGL(( batch_topK_kernel<T, K, BLOCK_SIZE_1>), dim3(batch_size), dim3(BLOCK_SIZE_1), 0, stream, topk_tmp_id_buf, topk_tmp_val_buf, ids); \ else \ hipLaunchKernelGGL(( batch_topK_kernel_v2<T, K, 32>), dim3(batch_size), dim3(32), 0, stream, topk_tmp_id_buf, topk_tmp_val_buf, ids); \ break; \ #define CASE_K(K,BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K: \ hipLaunchKernelGGL(( topk_stage_1_opt3<float, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_>), dim3(batch_size * K * BLOCKS_PER_BEAM_), dim3(BLOCK_SIZE_1_), 0, stream, \ log_probs, \ temp_log_probs, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished, \ beam_width, vocab_size, end_id); \ hipLaunchKernelGGL(( topk_stage_2_opt3<float, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_>), dim3(batch_size), dim3(BLOCK_SIZE_2_), K * sizeof(int), stream, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ ids, \ beam_width); \ break; \ template <typename T> void topK_kernelLauncher(void* workspace, size_t& workspace_size, T* log_probs, int* ids, const bool* finished, DecodingBeamsearchArguments args, hipStream_t stream) { const int batch_size = args.batch_size_; const int beam_width = args.beam_width_; const int vocab_size = args.vocab_size_padded_; const T diversity_rate = args.beam_search_diversity_rate_; const int end_id = args.end_id_; const int max_block_per_beam = 8; int temp_log_probs_buf_size = batch_size * beam_width * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * beam_width * beam_width * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * beam_width * beam_width * max_block_per_beam; // type float // prevent memory misalinged address temp_log_probs_buf_size = (int)(ceil(temp_log_probs_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(float) * temp_log_probs_buf_size + sizeof(int) * topk_tmp_ids_buf_size + sizeof(float) * topk_tmp_val_buf_size; return; } else { T* temp_log_probs = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_log_probs + temp_log_probs_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); if(diversity_rate == 0.0f) { switch(beam_width) { CASE_K(1,128,128,8); CASE_K(4,128,128,8); CASE_K(10,128,128,8); CASE_K(16,128,128,5); CASE_K(32,256,128,1); CASE_K(64,256,256,1); default: hipLaunchKernelGGL(( topk_stage_1_opt2_general<T, 128, 1>), dim3(batch_size * beam_width * 1), dim3(128), 0, stream, log_probs, temp_log_probs, topk_tmp_id_buf, topk_tmp_val_buf, beam_width, vocab_size); hipLaunchKernelGGL(( topk_stage_2_opt2_general<T, 128, 1>), dim3(batch_size), dim3(128), beam_width*beam_width*1*sizeof(float) + beam_width * sizeof(int), stream, topk_tmp_id_buf, topk_tmp_val_buf, ids, beam_width); break; } } else { switch(beam_width) { CASE_K_DIV(1,256,256); CASE_K_DIV(4,256,256); CASE_K_DIV(16,256,64); CASE_K_DIV(64,256,64); default: printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); exit(0); break; } } return; } } #undef CASE_K #undef CASE_K_DIV template void topK_kernelLauncher<float>(void* workspace, size_t& workspace_size, float* log_probs, int* ids, const bool* finished, DecodingBeamsearchArguments args, hipStream_t stream); // Sampling kernels template<typename T> __global__ void sampling(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int candidate_num, int random_num, const int end_id, const int vocab_size) { int tid = threadIdx.x; int bid = blockIdx.x; __shared__ float sum; __shared__ float rand_num; if(tid < candidate_num) { float max_val = topk_tmp_val_buf[bid * candidate_num]; topk_tmp_val_buf[bid * candidate_num + tid] = (T)__expf((float)topk_tmp_val_buf[bid * candidate_num + tid] - max_val); } if(tid == 0) { sum = 0.0f; for(int i = 0; i < candidate_num; i++) { sum = sum + (float)topk_tmp_val_buf[bid * candidate_num + i]; } hiprandState_t local_state; hiprand_init((T)random_num, bid, 0, &local_state); rand_num = (float)hiprand_uniform(&local_state) * sum; ids[bid] = topk_tmp_id_buf[bid * candidate_num + candidate_num - 1] % vocab_size; for(int i = 0; i < candidate_num; i++) { rand_num = rand_num - (float)topk_tmp_val_buf[bid * candidate_num + i]; if(rand_num <= 0.0f){ ids[bid] = topk_tmp_id_buf[bid * candidate_num + i] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[bid] = ids[bid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[bid] = finished_buf[bid] ? sequence_length[bid] : sequence_length[bid] + 1; } } } } #define CASE_K(K) \ case K : \ hipLaunchKernelGGL(( beam_topK_kernel<T, K, block_size>), dim3(batch_size), dim3(block_size), 0, stream, log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, 0.0f); \ break; \ template <typename T> void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, T* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, hipStream_t stream, const int batch_size) { // This function would be called two or more times. // First time is used to get the workspace size, so we need to put // max batch size we want to use. // For other times, we need to put the inference batch size to // set the grid size we use. const int vocab_size = args.vocab_size_padded_; const int candidate_num = args.candidate_num_; const int end_id = args.end_id_; const int block_size = 256; int topk_tmp_ids_buf_size = args.batch_size_ * args.candidate_num_; // type int int topk_tmp_val_buf_size = args.batch_size_ * args.candidate_num_; // type T topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(int) * topk_tmp_ids_buf_size + sizeof(T) * topk_tmp_val_buf_size; } else { int* topk_tmp_id_buf = (int*)workspace; T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); switch(candidate_num) { CASE_K(1); CASE_K(2); CASE_K(4); CASE_K(16); CASE_K(64); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } hipLaunchKernelGGL(( sampling<T>) , dim3(batch_size), dim3(candidate_num), 0, stream, topk_tmp_id_buf, topk_tmp_val_buf, ids, sequence_length, finished_buf, candidate_num, random_num, end_id, vocab_size); } } #undef CASE_K #define CASE_K(K_MIN, K_MAX, BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K_MIN ... K_MAX: \ hipLaunchKernelGGL(( topk_stage_1_opt3<T, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_>), dim3(batch_size * BLOCKS_PER_BEAM_), dim3(BLOCK_SIZE_1_), 0, stream, \ log_probs, \ temp_log_probs, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished_buf, \ candidate_num, vocab_size, end_id); \ hipLaunchKernelGGL(( topk_stage_2_opt3_sampling<T, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_>), dim3(batch_size), dim3(BLOCK_SIZE_2_), K_MAX * sizeof(int) , stream, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ topk_tmp2_val_buf, \ ids, \ sequence_length, \ finished_buf, \ candidate_num, \ curandstate, \ end_id, \ vocab_size); \ break; \ template <typename T> void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, T* log_probs, int* ids, int* sequence_length, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments args, hipStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int candidate_num = args.candidate_num_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const int max_block_per_beam = 8; int temp_log_probs_buf_size = batch_size * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * candidate_num * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * candidate_num * max_block_per_beam; // type float // prevent memory misalinged address temp_log_probs_buf_size = (int)(ceil(temp_log_probs_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(T) * temp_log_probs_buf_size + sizeof(int) * topk_tmp_ids_buf_size + 2 * sizeof(T) * topk_tmp_val_buf_size; return; } else { T* temp_log_probs = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_log_probs + temp_log_probs_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); T* topk_tmp2_val_buf = (T*)(topk_tmp_val_buf + topk_tmp_val_buf_size); switch(candidate_num) { CASE_K(1,16,128,128,8); CASE_K(17,32,256,128,8); CASE_K(33,64,256,256,8); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } return; } } #undef CASE_K template void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, float* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, hipStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, half* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, hipStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, float* log_probs, int* ids, int* sequence_length, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments args, hipStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, half* log_probs, int* ids, int* sequence_length, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments args, hipStream_t stream, const int batch_size); __global__ void init_topp_id_val(int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size) { int tid = threadIdx.x; int bid = blockIdx.x; if(bid == 0) { for(int i = tid; i < batch_size + 1; i+= blockDim.x) { topp_offset_buf[i] = i * vocab_size; } } while(tid < vocab_size) { topp_id_val_buf[bid * vocab_size + tid] = tid; tid += blockDim.x; } } void init_topp_id_val_kernel_kernelLauncher(int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size, hipStream_t stream) { hipLaunchKernelGGL(( init_topp_id_val), dim3(batch_size), dim3(512), 0, stream, topp_id_val_buf, topp_offset_buf, batch_size, vocab_size); } // Sampling kernels template<typename T> __global__ void top_p_sampling(T* sorted_log_probs, int* sorted_id_vals, int* ids, int* sequence_length, bool* finished_buf, const int vocab_size, const int random_num, const float prob_threshold, const int end_id) { int tid = threadIdx.x; hiprandState_t local_state; hiprand_init((T)random_num, tid, 0, &local_state); T rand_num = (T)hiprand_uniform(&local_state) * (T)prob_threshold; ids[tid] = sorted_id_vals[tid * vocab_size]; for(int i = tid * vocab_size; i < tid * vocab_size + vocab_size; i++) { rand_num = rand_num - sorted_log_probs[i]; if(rand_num <= (T)0.0f) { ids[tid] = sorted_id_vals[i]; break; } } if(finished_buf != nullptr) { finished_buf[tid] = ids[tid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[tid] = finished_buf[tid] ? sequence_length[tid] : sequence_length[tid] + 1; } } } template<typename T> __global__ void top_p_sampling_v2(T* sorted_log_probs, int* sorted_id_vals, int* ids, int* sequence_length, bool* finished_buf, const int vocab_size, hiprandState_t* curandstate, const float prob_threshold, const int end_id, const int batch_size) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < batch_size) { T rand_num = (T)hiprand_uniform(curandstate + tid) * (T)prob_threshold; ids[tid] = sorted_id_vals[vocab_size - 1]; for(int i = tid * vocab_size; i < tid * vocab_size + vocab_size; i++) { rand_num = rand_num - sorted_log_probs[i]; if(rand_num <= (T)0.0) { ids[tid] = sorted_id_vals[i]; break; } }; if(finished_buf != nullptr) { finished_buf[tid] = ids[tid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[tid] = finished_buf[tid] ? sequence_length[tid] : sequence_length[tid] + 1; } } } } template <typename T> __global__ void sort_kernel(const T* log_probs, const int* id_vals, T* sorted_log_probs, int* sorted_id_vals, const int vocab_size) { typedef cub::BlockRadixSort<T, 256, 32, int> BlockRadixSort; __shared__ typename BlockRadixSort::TempStorage temp_storage; // Obtain a segment of consecutive items that are blocked across threads T thread_keys[32]; int thread_values[32]; int tid = threadIdx.x; int bid = blockIdx.x; for(int i = 0; i < 32; i++) { int index = tid + 256 * i + bid * vocab_size; thread_keys[i] = log_probs[index]; thread_values[i] = id_vals[index]; } BlockRadixSort(temp_storage).SortDescending(thread_keys, thread_values); for(int i = 0; i < 32; i++) { int index = tid + 256 * i + bid * vocab_size; sorted_log_probs[index] = thread_keys[i]; sorted_id_vals[index] = thread_values[i]; } } template<typename T> void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const T* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size) { const int vocab_size = args.vocab_size_padded_; int sorted_log_prob_buf_size = batch_size * vocab_size; // type T int sorted_id_vals_buf_size = batch_size * vocab_size; // type int sorted_log_prob_buf_size = (int)(ceil(sorted_log_prob_buf_size / 4.)) * 4; sorted_id_vals_buf_size = (int)(ceil(sorted_id_vals_buf_size / 4.)) * 4; void *cub_temp_storage = workspace; T* sorted_log_probs = (T*)((char*)cub_temp_storage + args.cub_temp_storage_size_); int* sorted_id_vals = (int*)(sorted_log_probs + sorted_log_prob_buf_size); if(workspace == nullptr) { hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, args.cub_temp_storage_size_, log_probs, (T*)nullptr, id_vals, (int*)nullptr, vocab_size * batch_size, batch_size, offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // hipStream_t args.cub_temp_storage_size_ = (int)(ceil(args.cub_temp_storage_size_ / 4.)) * 4; workspace_size = sizeof(T) * sorted_log_prob_buf_size + sizeof(int) * sorted_id_vals_buf_size + args.cub_temp_storage_size_; } else { hipcub::DeviceSegmentedRadixSort::SortPairsDescending(cub_temp_storage, args.cub_temp_storage_size_, log_probs, sorted_log_probs, id_vals, sorted_id_vals, n * batch_size, batch_size, offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // hipStream_t hipLaunchKernelGGL(( top_p_sampling), dim3(1), dim3(batch_size), 0, stream, sorted_log_probs, sorted_id_vals, output_ids, sequence_length, finished_buf, n, step, args.probability_threshold_, args.end_id_); } } template void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const float* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size); template void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const half* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_topK_kernel_for_topP(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int vocab_size, int* offset_buf, int* begin_offset_buf, float p_threshold) { typedef hipcub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(log_probs[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(thread_id == 0) { begin_offset_buf[block_id] = offset_buf[block_id]; T sum_prob = (T)(0.0f); #pragma unroll for(int i = 0; i < MAX_K; i++) { sum_prob += total.u[i]; } if ((float)sum_prob >= p_threshold) { begin_offset_buf[block_id] += vocab_size; int index = block_id * vocab_size; #pragma unroll for(int i = 0; i < MAX_K; ++i) { topk_tmp_id_buf[index + i] = total.p[i]%vocab_size; topk_tmp_val_buf[index + i] = total.u[i]; } } } } template<typename T> void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const T* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int vocab_size = args.vocab_size_padded_; const int block_size = 256; int sorted_log_prob_buf_size = batch_size * vocab_size; // type T int sorted_id_vals_buf_size = batch_size * vocab_size; // type int sorted_log_prob_buf_size = (int)(ceil(sorted_log_prob_buf_size / 4.)) * 4; sorted_id_vals_buf_size = (int)(ceil(sorted_id_vals_buf_size / 4.)) * 4; void *cub_temp_storage = workspace; T* sorted_log_probs = (T*)((char*)cub_temp_storage + args.cub_temp_storage_size_); int* sorted_id_vals = (int*)(sorted_log_probs + sorted_log_prob_buf_size); if(workspace == nullptr) { hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, args.cub_temp_storage_size_, log_probs, (T*)nullptr, id_vals, (int*)nullptr, vocab_size * batch_size, batch_size, begin_offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // hipStream_t args.cub_temp_storage_size_ = (int)(ceil(args.cub_temp_storage_size_ / 4.)) * 4; workspace_size = sizeof(T) * sorted_log_prob_buf_size + sizeof(int) * sorted_id_vals_buf_size + args.cub_temp_storage_size_; } else { hipLaunchKernelGGL(( beam_topK_kernel_for_topP<T, 1, block_size>), dim3(batch_size), dim3(block_size), 0, stream, log_probs, \ sorted_id_vals, sorted_log_probs, vocab_size, offset_buf,begin_offset_buf, args.probability_threshold_); hipcub::DeviceSegmentedRadixSort::SortPairsDescending(cub_temp_storage, args.cub_temp_storage_size_, log_probs, sorted_log_probs, id_vals, sorted_id_vals, n * batch_size, batch_size, begin_offset_buf, offset_buf+1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // hipStream_t dim3 block(256); dim3 grid((int)(ceil(batch_size * 1.0 / 256))); hipLaunchKernelGGL(( top_p_sampling_v2), dim3(grid), dim3(block), 0, stream, sorted_log_probs, sorted_id_vals, output_ids, sequence_length, finished_buf, n, curandstate, args.probability_threshold_, args.end_id_, batch_size); } } template void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const float* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size); template void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const half* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, hipStream_t stream, const int batch_size); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void topK_topP_sampling_kernel(int* output_ids, const T* logits, const int vocab_size, const int random_num, const float prob_threshold, T diversity_rate) { typedef hipcub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(logits[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(thread_id == 0) { // float sum = 0.0f; T sum = (T)(0.0f); T max_val = total.u[0]; #pragma unroll for(int i = 0; i < MAX_K; i++) { total.u[i] = total.u[i] + diversity_rate * (T)i; // diversely sampling penalty total.u[i] = (T)__expf((float)(total.u[i] - max_val)); sum += total.u[i]; } hiprandState_t local_state; hiprand_init((T)random_num, 0, block_id, &local_state); T rand_num = (T)hiprand_uniform(&local_state) * (T)prob_threshold * sum; output_ids[block_id] = total.p[0] % vocab_size; #pragma unroll for(int i = 0; i < MAX_K; i++) { rand_num = rand_num - total.u[i]; if(rand_num <= (T)0.0f){ output_ids[block_id] = total.p[i] % vocab_size; break; } } } } #define CASE_K(K) \ case K : \ hipLaunchKernelGGL(( topK_topP_sampling_kernel<T, K, block_size>), dim3(batch_size), dim3(block_size), 0, stream, output_ids, logits, \ vocab_size, random_num, prob_threshold, 0.0f); \ break; \ template<typename T> void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const T* logits, const int random_num, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size) { if(workspace == nullptr) { workspace_size = 0; } else { const int vocab_size = args.vocab_size_padded_; const int block_size = 256; const T prob_threshold = args.probability_threshold_; switch(args.candidate_num_) { CASE_K(1); CASE_K(2); CASE_K(4); CASE_K(16); CASE_K(64); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", args.candidate_num_); exit(0); break; } } } #undef CASE_K template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_topp_sampling_kernel_v2(const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, T* topk_tmp2_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int k, const T prob_threshold, hiprandState_t* curandstate, const int end_id, const int vocab_size) { const int size = k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef hipcub::BlockReduce<TopK_2<float>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; __shared__ float rand_num; __shared__ float s_max; __shared__ float s_sum; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); s_max = 0.0f; s_sum = 0.0f; TopK_2<float> partial; for(int index = tid; index < size; index += BLOCK_SIZE_) { topk_tmp2_val_buf[batch_id * size + index] = topk_tmp_val_buf[batch_id * size + index]; } __syncthreads(); T *s_val2 = topk_tmp2_val_buf + batch_id * size; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert((float)s_val[i], i); } TopK_2<float> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<float>); if(ite == 0) s_max = total.u; if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; total.u = __expf(total.u - s_max); s_val2[total.p] = (T)total.u; s_sum += total.u; } __syncthreads(); } if(tid == 0) { rand_num = (float)hiprand_uniform(curandstate + blockIdx.x) * (float)prob_threshold * s_sum; for(int i = 0; i < k; i++) { rand_num = rand_num - (float)s_val2[s_id[i]]; if(rand_num <= 0.0f) { ids[batch_id] = topk_tmp_id_buf[batch_id * size + s_id[i]] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[batch_id] = ids[batch_id] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[batch_id] = finished_buf[batch_id] ? sequence_length[batch_id] : sequence_length[batch_id] + 1; } } } } #define CASE_K(K_MIN, K_MAX ,BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K_MIN ... K_MAX: \ hipLaunchKernelGGL(( topk_stage_1_opt3<T, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_>), dim3(batch_size * BLOCKS_PER_BEAM_), dim3(BLOCK_SIZE_1_), 0, stream, \ logits, \ temp_logits, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished_buf, \ candidate_num, vocab_size, end_id); \ hipLaunchKernelGGL(( topk_topp_sampling_kernel_v2<T, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_>), dim3(batch_size), dim3(BLOCK_SIZE_2_), K_MAX * sizeof(int) , stream, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ topk_tmp2_val_buf, \ output_ids, \ nullptr, \ finished_buf, \ candidate_num, \ prob_threshold, \ curandstate, \ end_id, \ vocab_size); \ break; \ template <typename T> void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const T* logits, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int candidate_num = args.candidate_num_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const T prob_threshold = args.probability_threshold_; const int max_block_per_beam = 8; int temp_logits_buf_size = batch_size * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * candidate_num * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * candidate_num * max_block_per_beam; // type float // prevent memory misalinged address temp_logits_buf_size = (int)(ceil(temp_logits_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(T) * temp_logits_buf_size + sizeof(int) * topk_tmp_ids_buf_size + 2 * sizeof(T) * topk_tmp_val_buf_size; return; } else { T* temp_logits = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_logits + temp_logits_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); T* topk_tmp2_val_buf = (T*)(topk_tmp_val_buf + topk_tmp_val_buf_size); switch(candidate_num) { CASE_K(1,16,128,128,8); CASE_K(17,32,256,128,8); CASE_K(33,64,256,256,8); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } return; } } #undef CASE_K template void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const float* logits, const int random_num, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const half* logits, const int random_num, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const float* logits, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const half* logits, bool* finished_buf, hiprandState_t* curandstate, DecodingSamplingArguments& args, hipStream_t stream, const int batch_size); } // end of namespace fastertransformer
f093d02e1a8fecd447bf0c9d35d3dd8e886d6382.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/cuda/topk_kernels.cuh" #include "cub/cub.cuh" namespace fastertransformer { __global__ void ker_curand_setup(curandState_t* state, const int size) { // curand_init(clock(), blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); // fix the seed to prevent the seed of different gpu are differnet in Tensor Parallel // if(threadIdx.x + blockIdx.x * blockDim.x < size) // curand_init(0, blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); if(threadIdx.x + blockIdx.x * blockDim.x < size) curand_init(clock(), blockIdx.x * blockDim.x + threadIdx.x, 0, &state[blockIdx.x * blockDim.x + threadIdx.x]); } void ker_curand_setupLauncher(curandState_t* state, DecodingSamplingArguments args, cudaStream_t stream) { dim3 block(256); dim3 grid((int)(ceil(args.batch_size_ * 1.0 / 256))); ker_curand_setup<<<grid, block, 0, stream >>>(state, args.batch_size_); } template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_topK_kernel(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int vocab_size, T diversity_rate) { typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(log_probs[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if (thread_id == 0) { int index = block_id * MAX_K; #pragma unroll for(int i = 0; i < MAX_K; ++i) { topk_tmp_id_buf[index + i] = total.p[i]; topk_tmp_val_buf[index + i] = total.u[i] + diversity_rate * (T)i; } } } #define CASE_K(K) \ case K : \ beam_topK_kernel<T, K, block_size><<<batch_size, block_size, 0, stream>>>(log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, 0.0f); \ break; \ template<typename T> void beam_topK_kernelLauncher(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, DecodingSamplingArguments args, cudaStream_t stream) { const int batch_size = args.batch_size_; const int vocab_size = args.vocab_size_padded_; const int candidate_num = args.candidate_num_; const int block_size = 256; switch(candidate_num) { CASE_K(1); CASE_K(2); CASE_K(4); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } } #undef CASE_K template void beam_topK_kernelLauncher(const float* log_probs, int* topk_tmp_id_buf, float* topk_tmp_val_buf, DecodingSamplingArguments args, cudaStream_t stream); template void beam_topK_kernelLauncher(const half* log_probs, int* topk_tmp_id_buf, half* topk_tmp_val_buf, DecodingSamplingArguments args, cudaStream_t stream); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* id_buf) { int thread_id = threadIdx.x; int block_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; TopK<T, MAX_K> partial; if (thread_id == 0) { for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } int index = block_id * MAX_K * MAX_K; for(int i = 0; i < MAX_K * MAX_K; i++) { partial.insert( (T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); } index = block_id * MAX_K; for(int i = 0; i < MAX_K; i++) { id_buf[index + i] = partial.p[i]; } } } template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel_v2(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* id_buf) { typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x; int bid = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } int ite = MAX_K * MAX_K / THREADBLOCK_SIZE; #pragma unroll for(int i = 0; i < ite; i++) { int index = bid * MAX_K * MAX_K + i * THREADBLOCK_SIZE + tid; partial.insert( (T)topk_tmp_val_buf[index], topk_tmp_id_buf[index]); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(tid == 0) { #pragma unroll for(int i = 0; i < MAX_K; i++) id_buf[bid * MAX_K + i] = total.p[i]; } } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_1_opt3( const T* __restrict log_probs, T* tmp_log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const bool* finished, const int k, const int vocab_size, const int end_id ) { typedef cub::BlockReduce<TopK_2<T>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; const int row_id = bid / BLOCKS_PER_BEAM_; // row id for log_probs const int block_lane = bid % BLOCKS_PER_BEAM_; // block id for a beam const int tmp_log_buf_index = row_id * vocab_size; const int tmp_topk_buf_index = row_id * BLOCKS_PER_BEAM_ * k + block_lane * k; TopK_2<T> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; if(finished != nullptr && finished[row_id] == true) { if(tid < k) { const int index = tmp_topk_buf_index + tid; if(block_lane == 0 && tid == 0) { topk_tmp_id_buf[index] = tmp_log_buf_index + end_id; topk_tmp_val_buf[index] = log_probs[tmp_log_buf_index + end_id]; } else { topk_tmp_id_buf[index] = -1; topk_tmp_val_buf[index] = -MAX_T_VAL; } } return; } for(int elem_id = tid + block_lane * BLOCK_SIZE_; elem_id < vocab_size; elem_id += BLOCK_SIZE_ * BLOCKS_PER_BEAM_) { int index = elem_id + tmp_log_buf_index; tmp_log_probs[index] = log_probs[index]; } for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int elem_id = tid + block_lane * BLOCK_SIZE_; elem_id < vocab_size; elem_id += BLOCK_SIZE_ * BLOCKS_PER_BEAM_) { int index = elem_id + tmp_log_buf_index; partial.insert(tmp_log_probs[index], index); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if (tid == 0) { const int index = tmp_topk_buf_index + ite; topk_tmp_id_buf[index] = total.p; topk_tmp_val_buf[index] = total.u; tmp_log_probs[total.p] = -MAX_T_VAL; } __syncthreads(); } } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_2_opt3( const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, const int k) { const int size = k * k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef cub::BlockReduce<TopK_2<T>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); TopK_2<T> partial; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert(s_val[i], i); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; } __syncthreads(); } if(tid < k) ids[batch_id * k + tid] = topk_tmp_id_buf[batch_id * size + s_id[tid]]; } template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_stage_2_opt3_sampling(const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, T* topk_tmp2_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int k, curandState_t* curandstate, const int end_id, const int vocab_size) { const int size = k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef cub::BlockReduce<TopK_2<float>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; __shared__ float rand_num; __shared__ float s_sum; __shared__ float s_max; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); s_max = (float)0.0f; s_sum = (float)0.0f; TopK_2<float> partial; for(int index = tid; index < size; index += BLOCK_SIZE_) { topk_tmp2_val_buf[batch_id * size + index] = topk_tmp_val_buf[batch_id * size + index]; } __syncthreads(); T *s_val2 = topk_tmp2_val_buf + batch_id * size; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert((float)s_val[i], i); } TopK_2<float> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<float>); if(ite == 0) s_max = total.u; if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; total.u = __expf(total.u - s_max); s_val2[total.p] = (T)total.u; s_sum += total.u; } __syncthreads(); } if(tid == 0) { rand_num = (float)curand_uniform(curandstate + blockIdx.x) * s_sum; for(int i = 0; i < k; i++) { rand_num = rand_num - (float)s_val2[s_id[i]]; if(rand_num <= 0.0f) { ids[batch_id] = topk_tmp_id_buf[batch_id * size + s_id[i]] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[batch_id] = ids[batch_id] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[batch_id] = finished_buf[batch_id] ? sequence_length[batch_id] : sequence_length[batch_id] + 1; } } } } template<typename T, int BLOCK_SIZE, int BLOCKS_PER_BEAM> __global__ void topk_stage_1_opt2_general( const T* __restrict log_probs, T* tmp_log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int k, const int vocab_size ) { const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef cub::BlockReduce<TopK_2<T>, BLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; const int row_id = bid / BLOCKS_PER_BEAM; // row id for log_probs const int block_lane = bid % BLOCKS_PER_BEAM; // block id for a beam const int tmp_log_buf_index = row_id * vocab_size; const int tmp_topk_buf_index = row_id * BLOCKS_PER_BEAM * k + block_lane * k; TopK_2<T> partial; for(int elem_id = tid + block_lane * BLOCK_SIZE; elem_id < vocab_size; elem_id += BLOCK_SIZE * BLOCKS_PER_BEAM) { int index = elem_id + tmp_log_buf_index; tmp_log_probs[index] = log_probs[index]; } for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int elem_id = tid + block_lane * BLOCK_SIZE; elem_id < vocab_size; elem_id += BLOCK_SIZE * BLOCKS_PER_BEAM) { int index = elem_id + tmp_log_buf_index; partial.insert(tmp_log_probs[index], index); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if (tid == 0) { const int index = tmp_topk_buf_index + ite; topk_tmp_id_buf[index] = total.p; topk_tmp_val_buf[index] = total.u; tmp_log_probs[total.p] = -MAX_T_VAL; } __syncthreads(); } } template<typename T, int BLOCK_SIZE, int BLOCKS_PER_BEAM> __global__ void topk_stage_2_opt2_general( const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, const int k) { const int size = k * k * BLOCKS_PER_BEAM; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef cub::BlockReduce<TopK_2<T>, BLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); TopK_2<T> partial; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE) { partial.insert(s_val[i], i); } TopK_2<T> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<T>); if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; } __syncthreads(); } if(tid < k) ids[batch_id * k + tid] = topk_tmp_id_buf[batch_id * size + s_id[tid]]; } #define CASE_K_DIV(K,BLOCK_SIZE_1, BLOCK_SIZE_2) \ case K: \ beam_topK_kernel<T, K, BLOCK_SIZE_2><<<batch_size * beam_width, BLOCK_SIZE_2, 0, stream>>>(log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); \ if (K < 10) \ batch_topK_kernel<T, K, BLOCK_SIZE_1><<<batch_size, BLOCK_SIZE_1, 0, stream>>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); \ else \ batch_topK_kernel_v2<T, K, 32><<<batch_size, 32, 0, stream>>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); \ break; \ #define CASE_K(K,BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K: \ topk_stage_1_opt3<float, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_><<<batch_size * K * BLOCKS_PER_BEAM_, BLOCK_SIZE_1_, 0, stream>>>( \ log_probs, \ temp_log_probs, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished, \ beam_width, vocab_size, end_id); \ topk_stage_2_opt3<float, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_><<<batch_size, BLOCK_SIZE_2_, K * sizeof(int), stream>>>( \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ ids, \ beam_width); \ break; \ template <typename T> void topK_kernelLauncher(void* workspace, size_t& workspace_size, T* log_probs, int* ids, const bool* finished, DecodingBeamsearchArguments args, cudaStream_t stream) { const int batch_size = args.batch_size_; const int beam_width = args.beam_width_; const int vocab_size = args.vocab_size_padded_; const T diversity_rate = args.beam_search_diversity_rate_; const int end_id = args.end_id_; const int max_block_per_beam = 8; int temp_log_probs_buf_size = batch_size * beam_width * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * beam_width * beam_width * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * beam_width * beam_width * max_block_per_beam; // type float // prevent memory misalinged address temp_log_probs_buf_size = (int)(ceil(temp_log_probs_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(float) * temp_log_probs_buf_size + sizeof(int) * topk_tmp_ids_buf_size + sizeof(float) * topk_tmp_val_buf_size; return; } else { T* temp_log_probs = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_log_probs + temp_log_probs_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); if(diversity_rate == 0.0f) { switch(beam_width) { CASE_K(1,128,128,8); CASE_K(4,128,128,8); CASE_K(10,128,128,8); CASE_K(16,128,128,5); CASE_K(32,256,128,1); CASE_K(64,256,256,1); default: topk_stage_1_opt2_general<T, 128, 1><<<batch_size * beam_width * 1, 128, 0, stream>>>( log_probs, temp_log_probs, topk_tmp_id_buf, topk_tmp_val_buf, beam_width, vocab_size); topk_stage_2_opt2_general<T, 128, 1><<<batch_size, 128, beam_width*beam_width*1*sizeof(float) + beam_width * sizeof(int), stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids, beam_width); break; } } else { switch(beam_width) { CASE_K_DIV(1,256,256); CASE_K_DIV(4,256,256); CASE_K_DIV(16,256,64); CASE_K_DIV(64,256,64); default: printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); exit(0); break; } } return; } } #undef CASE_K #undef CASE_K_DIV template void topK_kernelLauncher<float>(void* workspace, size_t& workspace_size, float* log_probs, int* ids, const bool* finished, DecodingBeamsearchArguments args, cudaStream_t stream); // Sampling kernels template<typename T> __global__ void sampling(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int candidate_num, int random_num, const int end_id, const int vocab_size) { int tid = threadIdx.x; int bid = blockIdx.x; __shared__ float sum; __shared__ float rand_num; if(tid < candidate_num) { float max_val = topk_tmp_val_buf[bid * candidate_num]; topk_tmp_val_buf[bid * candidate_num + tid] = (T)__expf((float)topk_tmp_val_buf[bid * candidate_num + tid] - max_val); } if(tid == 0) { sum = 0.0f; for(int i = 0; i < candidate_num; i++) { sum = sum + (float)topk_tmp_val_buf[bid * candidate_num + i]; } curandState_t local_state; curand_init((T)random_num, bid, 0, &local_state); rand_num = (float)curand_uniform(&local_state) * sum; ids[bid] = topk_tmp_id_buf[bid * candidate_num + candidate_num - 1] % vocab_size; for(int i = 0; i < candidate_num; i++) { rand_num = rand_num - (float)topk_tmp_val_buf[bid * candidate_num + i]; if(rand_num <= 0.0f){ ids[bid] = topk_tmp_id_buf[bid * candidate_num + i] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[bid] = ids[bid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[bid] = finished_buf[bid] ? sequence_length[bid] : sequence_length[bid] + 1; } } } } #define CASE_K(K) \ case K : \ beam_topK_kernel<T, K, block_size><<<batch_size, block_size, 0, stream>>>(log_probs, \ topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, 0.0f); \ break; \ template <typename T> void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, T* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size) { // This function would be called two or more times. // First time is used to get the workspace size, so we need to put // max batch size we want to use. // For other times, we need to put the inference batch size to // set the grid size we use. const int vocab_size = args.vocab_size_padded_; const int candidate_num = args.candidate_num_; const int end_id = args.end_id_; const int block_size = 256; int topk_tmp_ids_buf_size = args.batch_size_ * args.candidate_num_; // type int int topk_tmp_val_buf_size = args.batch_size_ * args.candidate_num_; // type T topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(int) * topk_tmp_ids_buf_size + sizeof(T) * topk_tmp_val_buf_size; } else { int* topk_tmp_id_buf = (int*)workspace; T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); switch(candidate_num) { CASE_K(1); CASE_K(2); CASE_K(4); CASE_K(16); CASE_K(64); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } sampling<T> <<< batch_size, candidate_num, 0, stream>>> (topk_tmp_id_buf, topk_tmp_val_buf, ids, sequence_length, finished_buf, candidate_num, random_num, end_id, vocab_size); } } #undef CASE_K #define CASE_K(K_MIN, K_MAX, BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K_MIN ... K_MAX: \ topk_stage_1_opt3<T, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_><<<batch_size * BLOCKS_PER_BEAM_, BLOCK_SIZE_1_, 0, stream>>>( \ log_probs, \ temp_log_probs, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished_buf, \ candidate_num, vocab_size, end_id); \ topk_stage_2_opt3_sampling<T, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_><<<batch_size, BLOCK_SIZE_2_, K_MAX * sizeof(int) , stream>>>( \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ topk_tmp2_val_buf, \ ids, \ sequence_length, \ finished_buf, \ candidate_num, \ curandstate, \ end_id, \ vocab_size); \ break; \ template <typename T> void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, T* log_probs, int* ids, int* sequence_length, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int candidate_num = args.candidate_num_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const int max_block_per_beam = 8; int temp_log_probs_buf_size = batch_size * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * candidate_num * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * candidate_num * max_block_per_beam; // type float // prevent memory misalinged address temp_log_probs_buf_size = (int)(ceil(temp_log_probs_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(T) * temp_log_probs_buf_size + sizeof(int) * topk_tmp_ids_buf_size + 2 * sizeof(T) * topk_tmp_val_buf_size; return; } else { T* temp_log_probs = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_log_probs + temp_log_probs_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); T* topk_tmp2_val_buf = (T*)(topk_tmp_val_buf + topk_tmp_val_buf_size); switch(candidate_num) { CASE_K(1,16,128,128,8); CASE_K(17,32,256,128,8); CASE_K(33,64,256,256,8); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } return; } } #undef CASE_K template void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, float* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, half* log_probs, int* ids, int* sequence_length, bool* finished_buf, int random_num, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, float* log_probs, int* ids, int* sequence_length, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size); template void topK_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, half* log_probs, int* ids, int* sequence_length, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments args, cudaStream_t stream, const int batch_size); __global__ void init_topp_id_val(int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size) { int tid = threadIdx.x; int bid = blockIdx.x; if(bid == 0) { for(int i = tid; i < batch_size + 1; i+= blockDim.x) { topp_offset_buf[i] = i * vocab_size; } } while(tid < vocab_size) { topp_id_val_buf[bid * vocab_size + tid] = tid; tid += blockDim.x; } } void init_topp_id_val_kernel_kernelLauncher(int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size, cudaStream_t stream) { init_topp_id_val<<<batch_size, 512, 0, stream>>>(topp_id_val_buf, topp_offset_buf, batch_size, vocab_size); } // Sampling kernels template<typename T> __global__ void top_p_sampling(T* sorted_log_probs, int* sorted_id_vals, int* ids, int* sequence_length, bool* finished_buf, const int vocab_size, const int random_num, const float prob_threshold, const int end_id) { int tid = threadIdx.x; curandState_t local_state; curand_init((T)random_num, tid, 0, &local_state); T rand_num = (T)curand_uniform(&local_state) * (T)prob_threshold; ids[tid] = sorted_id_vals[tid * vocab_size]; for(int i = tid * vocab_size; i < tid * vocab_size + vocab_size; i++) { rand_num = rand_num - sorted_log_probs[i]; if(rand_num <= (T)0.0f) { ids[tid] = sorted_id_vals[i]; break; } } if(finished_buf != nullptr) { finished_buf[tid] = ids[tid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[tid] = finished_buf[tid] ? sequence_length[tid] : sequence_length[tid] + 1; } } } template<typename T> __global__ void top_p_sampling_v2(T* sorted_log_probs, int* sorted_id_vals, int* ids, int* sequence_length, bool* finished_buf, const int vocab_size, curandState_t* curandstate, const float prob_threshold, const int end_id, const int batch_size) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < batch_size) { T rand_num = (T)curand_uniform(curandstate + tid) * (T)prob_threshold; ids[tid] = sorted_id_vals[vocab_size - 1]; for(int i = tid * vocab_size; i < tid * vocab_size + vocab_size; i++) { rand_num = rand_num - sorted_log_probs[i]; if(rand_num <= (T)0.0) { ids[tid] = sorted_id_vals[i]; break; } }; if(finished_buf != nullptr) { finished_buf[tid] = ids[tid] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[tid] = finished_buf[tid] ? sequence_length[tid] : sequence_length[tid] + 1; } } } } template <typename T> __global__ void sort_kernel(const T* log_probs, const int* id_vals, T* sorted_log_probs, int* sorted_id_vals, const int vocab_size) { typedef cub::BlockRadixSort<T, 256, 32, int> BlockRadixSort; __shared__ typename BlockRadixSort::TempStorage temp_storage; // Obtain a segment of consecutive items that are blocked across threads T thread_keys[32]; int thread_values[32]; int tid = threadIdx.x; int bid = blockIdx.x; for(int i = 0; i < 32; i++) { int index = tid + 256 * i + bid * vocab_size; thread_keys[i] = log_probs[index]; thread_values[i] = id_vals[index]; } BlockRadixSort(temp_storage).SortDescending(thread_keys, thread_values); for(int i = 0; i < 32; i++) { int index = tid + 256 * i + bid * vocab_size; sorted_log_probs[index] = thread_keys[i]; sorted_id_vals[index] = thread_values[i]; } } template<typename T> void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const T* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size) { const int vocab_size = args.vocab_size_padded_; int sorted_log_prob_buf_size = batch_size * vocab_size; // type T int sorted_id_vals_buf_size = batch_size * vocab_size; // type int sorted_log_prob_buf_size = (int)(ceil(sorted_log_prob_buf_size / 4.)) * 4; sorted_id_vals_buf_size = (int)(ceil(sorted_id_vals_buf_size / 4.)) * 4; void *cub_temp_storage = workspace; T* sorted_log_probs = (T*)((char*)cub_temp_storage + args.cub_temp_storage_size_); int* sorted_id_vals = (int*)(sorted_log_probs + sorted_log_prob_buf_size); if(workspace == nullptr) { cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, args.cub_temp_storage_size_, log_probs, (T*)nullptr, id_vals, (int*)nullptr, vocab_size * batch_size, batch_size, offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // cudaStream_t args.cub_temp_storage_size_ = (int)(ceil(args.cub_temp_storage_size_ / 4.)) * 4; workspace_size = sizeof(T) * sorted_log_prob_buf_size + sizeof(int) * sorted_id_vals_buf_size + args.cub_temp_storage_size_; } else { cub::DeviceSegmentedRadixSort::SortPairsDescending(cub_temp_storage, args.cub_temp_storage_size_, log_probs, sorted_log_probs, id_vals, sorted_id_vals, n * batch_size, batch_size, offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // cudaStream_t top_p_sampling<<<1, batch_size, 0, stream>>>(sorted_log_probs, sorted_id_vals, output_ids, sequence_length, finished_buf, n, step, args.probability_threshold_, args.end_id_); } } template void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const float* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size); template void topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, const half* log_probs, const int* id_vals, const int* offset_buf, bool* finished_buf, int step, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_topK_kernel_for_topP(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int vocab_size, int* offset_buf, int* begin_offset_buf, float p_threshold) { typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(log_probs[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(thread_id == 0) { begin_offset_buf[block_id] = offset_buf[block_id]; T sum_prob = (T)(0.0f); #pragma unroll for(int i = 0; i < MAX_K; i++) { sum_prob += total.u[i]; } if ((float)sum_prob >= p_threshold) { begin_offset_buf[block_id] += vocab_size; int index = block_id * vocab_size; #pragma unroll for(int i = 0; i < MAX_K; ++i) { topk_tmp_id_buf[index + i] = total.p[i]%vocab_size; topk_tmp_val_buf[index + i] = total.u[i]; } } } } template<typename T> void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const T* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int vocab_size = args.vocab_size_padded_; const int block_size = 256; int sorted_log_prob_buf_size = batch_size * vocab_size; // type T int sorted_id_vals_buf_size = batch_size * vocab_size; // type int sorted_log_prob_buf_size = (int)(ceil(sorted_log_prob_buf_size / 4.)) * 4; sorted_id_vals_buf_size = (int)(ceil(sorted_id_vals_buf_size / 4.)) * 4; void *cub_temp_storage = workspace; T* sorted_log_probs = (T*)((char*)cub_temp_storage + args.cub_temp_storage_size_); int* sorted_id_vals = (int*)(sorted_log_probs + sorted_log_prob_buf_size); if(workspace == nullptr) { cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, args.cub_temp_storage_size_, log_probs, (T*)nullptr, id_vals, (int*)nullptr, vocab_size * batch_size, batch_size, begin_offset_buf, offset_buf + 1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // cudaStream_t args.cub_temp_storage_size_ = (int)(ceil(args.cub_temp_storage_size_ / 4.)) * 4; workspace_size = sizeof(T) * sorted_log_prob_buf_size + sizeof(int) * sorted_id_vals_buf_size + args.cub_temp_storage_size_; } else { beam_topK_kernel_for_topP<T, 1, block_size><<<batch_size, block_size, 0, stream>>>(log_probs, \ sorted_id_vals, sorted_log_probs, vocab_size, offset_buf,begin_offset_buf, args.probability_threshold_); cub::DeviceSegmentedRadixSort::SortPairsDescending(cub_temp_storage, args.cub_temp_storage_size_, log_probs, sorted_log_probs, id_vals, sorted_id_vals, n * batch_size, batch_size, begin_offset_buf, offset_buf+1, 0, // begin_bit sizeof(T)*8, // end_bit = sizeof(KeyT) * 8 stream); // cudaStream_t dim3 block(256); dim3 grid((int)(ceil(batch_size * 1.0 / 256))); top_p_sampling_v2<<<grid, block, 0, stream>>>(sorted_log_probs, sorted_id_vals, output_ids, sequence_length, finished_buf, n, curandstate, args.probability_threshold_, args.end_id_, batch_size); } } template void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const float* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size); template void topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, const half* log_probs, const int* id_vals, int* offset_buf, int* begin_offset_buf, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, int* output_ids, int* sequence_length, const int n, cudaStream_t stream, const int batch_size); template<typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void topK_topP_sampling_kernel(int* output_ids, const T* logits, const int vocab_size, const int random_num, const float prob_threshold, T diversity_rate) { typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; #pragma unroll for(int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } #pragma unroll for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert(logits[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if(thread_id == 0) { // float sum = 0.0f; T sum = (T)(0.0f); T max_val = total.u[0]; #pragma unroll for(int i = 0; i < MAX_K; i++) { total.u[i] = total.u[i] + diversity_rate * (T)i; // diversely sampling penalty total.u[i] = (T)__expf((float)(total.u[i] - max_val)); sum += total.u[i]; } curandState_t local_state; curand_init((T)random_num, 0, block_id, &local_state); T rand_num = (T)curand_uniform(&local_state) * (T)prob_threshold * sum; output_ids[block_id] = total.p[0] % vocab_size; #pragma unroll for(int i = 0; i < MAX_K; i++) { rand_num = rand_num - total.u[i]; if(rand_num <= (T)0.0f){ output_ids[block_id] = total.p[i] % vocab_size; break; } } } } #define CASE_K(K) \ case K : \ topK_topP_sampling_kernel<T, K, block_size><<<batch_size, block_size, 0, stream>>>(output_ids, logits, \ vocab_size, random_num, prob_threshold, 0.0f); \ break; \ template<typename T> void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const T* logits, const int random_num, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size) { if(workspace == nullptr) { workspace_size = 0; } else { const int vocab_size = args.vocab_size_padded_; const int block_size = 256; const T prob_threshold = args.probability_threshold_; switch(args.candidate_num_) { CASE_K(1); CASE_K(2); CASE_K(4); CASE_K(16); CASE_K(64); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", args.candidate_num_); exit(0); break; } } } #undef CASE_K template<typename T, int BLOCK_SIZE_, int BLOCKS_PER_BEAM_> __global__ void topk_topp_sampling_kernel_v2(const int* __restrict topk_tmp_id_buf, T* topk_tmp_val_buf, T* topk_tmp2_val_buf, int* ids, int* sequence_length, bool* finished_buf, const int k, const T prob_threshold, curandState_t* curandstate, const int end_id, const int vocab_size) { const int size = k * BLOCKS_PER_BEAM_; const int tid = threadIdx.x; const int batch_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; typedef cub::BlockReduce<TopK_2<float>, BLOCK_SIZE_> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; extern __shared__ char array[]; __shared__ float rand_num; __shared__ float s_max; __shared__ float s_sum; T *s_val = topk_tmp_val_buf + batch_id * size; int *s_id = (int*)(array); s_max = 0.0f; s_sum = 0.0f; TopK_2<float> partial; for(int index = tid; index < size; index += BLOCK_SIZE_) { topk_tmp2_val_buf[batch_id * size + index] = topk_tmp_val_buf[batch_id * size + index]; } __syncthreads(); T *s_val2 = topk_tmp2_val_buf + batch_id * size; for(int ite = 0; ite < k; ite++) { partial.init(); #pragma unroll for(int i = tid; i < size; i+= BLOCK_SIZE_) { partial.insert((float)s_val[i], i); } TopK_2<float> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op_2<float>); if(ite == 0) s_max = total.u; if(tid == 0) { s_id[ite] = total.p; s_val[total.p] = -MAX_T_VAL; total.u = __expf(total.u - s_max); s_val2[total.p] = (T)total.u; s_sum += total.u; } __syncthreads(); } if(tid == 0) { rand_num = (float)curand_uniform(curandstate + blockIdx.x) * (float)prob_threshold * s_sum; for(int i = 0; i < k; i++) { rand_num = rand_num - (float)s_val2[s_id[i]]; if(rand_num <= 0.0f) { ids[batch_id] = topk_tmp_id_buf[batch_id * size + s_id[i]] % vocab_size; break; } } if(finished_buf != nullptr) { finished_buf[batch_id] = ids[batch_id] == end_id ? 1 : 0; if(sequence_length != nullptr) { sequence_length[batch_id] = finished_buf[batch_id] ? sequence_length[batch_id] : sequence_length[batch_id] + 1; } } } } #define CASE_K(K_MIN, K_MAX ,BLOCK_SIZE_1_, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_) \ case K_MIN ... K_MAX: \ topk_stage_1_opt3<T, BLOCK_SIZE_1_, BLOCKS_PER_BEAM_><<<batch_size * BLOCKS_PER_BEAM_, BLOCK_SIZE_1_, 0, stream>>>( \ logits, \ temp_logits, \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ finished_buf, \ candidate_num, vocab_size, end_id); \ topk_topp_sampling_kernel_v2<T, BLOCK_SIZE_2_, BLOCKS_PER_BEAM_><<<batch_size, BLOCK_SIZE_2_, K_MAX * sizeof(int) , stream>>>( \ topk_tmp_id_buf, \ topk_tmp_val_buf, \ topk_tmp2_val_buf, \ output_ids, \ nullptr, \ finished_buf, \ candidate_num, \ prob_threshold, \ curandstate, \ end_id, \ vocab_size); \ break; \ template <typename T> void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const T* logits, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size) { // Here, we put batch size as an argument because the batch size of initialization // and inference may be different due to pipelint parallelism. const int candidate_num = args.candidate_num_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const T prob_threshold = args.probability_threshold_; const int max_block_per_beam = 8; int temp_logits_buf_size = batch_size * vocab_size; // type float int topk_tmp_ids_buf_size = batch_size * candidate_num * max_block_per_beam; // type int int topk_tmp_val_buf_size = batch_size * candidate_num * max_block_per_beam; // type float // prevent memory misalinged address temp_logits_buf_size = (int)(ceil(temp_logits_buf_size / 4.)) * 4; topk_tmp_ids_buf_size = (int)(ceil(topk_tmp_ids_buf_size / 4.)) * 4; topk_tmp_val_buf_size = (int)(ceil(topk_tmp_val_buf_size / 4.)) * 4; if(workspace == nullptr) { workspace_size = sizeof(T) * temp_logits_buf_size + sizeof(int) * topk_tmp_ids_buf_size + 2 * sizeof(T) * topk_tmp_val_buf_size; return; } else { T* temp_logits = (T*)workspace; int* topk_tmp_id_buf = (int*)(temp_logits + temp_logits_buf_size); T* topk_tmp_val_buf = (T*)(topk_tmp_id_buf + topk_tmp_ids_buf_size); T* topk_tmp2_val_buf = (T*)(topk_tmp_val_buf + topk_tmp_val_buf_size); switch(candidate_num) { CASE_K(1,16,128,128,8); CASE_K(17,32,256,128,8); CASE_K(33,64,256,256,8); default: printf("[ERROR] Topk kernel does not support candidate_num = %d \n", candidate_num); exit(0); break; } return; } } #undef CASE_K template void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const float* logits, const int random_num, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher(void* workspace, size_t& workspace_size, int* output_ids, const half* logits, const int random_num, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const float* logits, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size); template void topK_topP_sampling_kernel_kernelLauncher_v2(void* workspace, size_t& workspace_size, int* output_ids, const half* logits, bool* finished_buf, curandState_t* curandstate, DecodingSamplingArguments& args, cudaStream_t stream, const int batch_size); } // end of namespace fastertransformer
da8f07e2ef64f1affb3a8f90ef690875fb133c8d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hip/hip_runtime_api.h" #include "gtest/gtest.h" #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <rmm/exec_policy.hpp> #include <cugraph/algorithms.hpp> #include <raft/handle.hpp> #include <topology/topology.cuh> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <tuple> #include <utilities/high_res_timer.hpp> #include <vector> namespace topo = cugraph::topology; struct RandomWalks_Usecase { std::string graph_file_full_path{}; bool test_weighted{false}; RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; }; class Tests_RWSegSort : public ::testing::TestWithParam<RandomWalks_Usecase> { public: Tests_RWSegSort() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t> void run_current_test(RandomWalks_Usecase const& target) { raft::handle_t handle{}; // debuf info: // // std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl; cugraph::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle); std::tie(graph, std::ignore) = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>( handle, target.graph_file_full_path, target.test_weighted, false); size_t num_vertices = graph.get_number_of_vertices(); size_t num_edges = graph.get_number_of_edges(); topo::segment_sorter_by_weights_t seg_sort(handle, num_vertices, num_edges); auto graph_view = graph.view(); // NOTE: barring a graph.sort() method, // this const_cast<> is the only way to test // segmented weight sort for a graph; // edge_t* offsets = const_cast<edge_t*>(graph_view.get_matrix_partition_view().get_offsets()); vertex_t* indices = const_cast<vertex_t*>(graph_view.get_matrix_partition_view().get_indices()); weight_t* values = const_cast<weight_t*>(*(graph_view.get_matrix_partition_view().get_weights())); HighResTimer hr_timer; std::string label{}; label = std::string("Biased RW: CUB Segmented Sort."); hr_timer.start(label); hipProfilerStart(); auto [d_srt_indices, d_srt_weights] = seg_sort(offsets, indices, values); hipProfilerStop(); hr_timer.stop(); bool check_seg_sort = topo::check_segmented_sort(handle, offsets, d_srt_weights.data(), num_vertices, num_edges); ASSERT_TRUE(check_seg_sort); try { auto runtime = hr_timer.get_average_runtime(label); std::cout << "Segmented Sort for Biased RW:\n"; } catch (std::exception const& ex) { std::cerr << ex.what() << '\n'; return; } catch (...) { std::cerr << "ERROR: Unknown exception on timer label search." << '\n'; return; } hr_timer.display(std::cout); } }; TEST_P(Tests_RWSegSort, Initialize_i32_i32_f) { run_current_test<int32_t, int32_t, float>(GetParam()); } INSTANTIATE_TEST_SUITE_P( simple_test, Tests_RWSegSort, ::testing::Values(RandomWalks_Usecase("test/datasets/karate.mtx", true), RandomWalks_Usecase("test/datasets/web-Google.mtx", true), RandomWalks_Usecase("test/datasets/ljournal-2008.mtx", true), RandomWalks_Usecase("test/datasets/webbase-1M.mtx", true))); CUGRAPH_TEST_PROGRAM_MAIN()
da8f07e2ef64f1affb3a8f90ef690875fb133c8d.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_profiler_api.h" #include "gtest/gtest.h" #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <rmm/exec_policy.hpp> #include <cugraph/algorithms.hpp> #include <raft/handle.hpp> #include <topology/topology.cuh> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <tuple> #include <utilities/high_res_timer.hpp> #include <vector> namespace topo = cugraph::topology; struct RandomWalks_Usecase { std::string graph_file_full_path{}; bool test_weighted{false}; RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; }; class Tests_RWSegSort : public ::testing::TestWithParam<RandomWalks_Usecase> { public: Tests_RWSegSort() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t> void run_current_test(RandomWalks_Usecase const& target) { raft::handle_t handle{}; // debuf info: // // std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl; cugraph::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle); std::tie(graph, std::ignore) = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>( handle, target.graph_file_full_path, target.test_weighted, false); size_t num_vertices = graph.get_number_of_vertices(); size_t num_edges = graph.get_number_of_edges(); topo::segment_sorter_by_weights_t seg_sort(handle, num_vertices, num_edges); auto graph_view = graph.view(); // NOTE: barring a graph.sort() method, // this const_cast<> is the only way to test // segmented weight sort for a graph; // edge_t* offsets = const_cast<edge_t*>(graph_view.get_matrix_partition_view().get_offsets()); vertex_t* indices = const_cast<vertex_t*>(graph_view.get_matrix_partition_view().get_indices()); weight_t* values = const_cast<weight_t*>(*(graph_view.get_matrix_partition_view().get_weights())); HighResTimer hr_timer; std::string label{}; label = std::string("Biased RW: CUB Segmented Sort."); hr_timer.start(label); cudaProfilerStart(); auto [d_srt_indices, d_srt_weights] = seg_sort(offsets, indices, values); cudaProfilerStop(); hr_timer.stop(); bool check_seg_sort = topo::check_segmented_sort(handle, offsets, d_srt_weights.data(), num_vertices, num_edges); ASSERT_TRUE(check_seg_sort); try { auto runtime = hr_timer.get_average_runtime(label); std::cout << "Segmented Sort for Biased RW:\n"; } catch (std::exception const& ex) { std::cerr << ex.what() << '\n'; return; } catch (...) { std::cerr << "ERROR: Unknown exception on timer label search." << '\n'; return; } hr_timer.display(std::cout); } }; TEST_P(Tests_RWSegSort, Initialize_i32_i32_f) { run_current_test<int32_t, int32_t, float>(GetParam()); } INSTANTIATE_TEST_SUITE_P( simple_test, Tests_RWSegSort, ::testing::Values(RandomWalks_Usecase("test/datasets/karate.mtx", true), RandomWalks_Usecase("test/datasets/web-Google.mtx", true), RandomWalks_Usecase("test/datasets/ljournal-2008.mtx", true), RandomWalks_Usecase("test/datasets/webbase-1M.mtx", true))); CUGRAPH_TEST_PROGRAM_MAIN()
21ca0b15a24d069780c5e9cc5feba559c69c2801.hip
// !!! This is a file automatically generated by hipify!!! /** * dropc kerenel testing code */ #include <cassert> #include <iostream> #include <string> #include "libs/matrix.h" #include "libs/nvmatrix.cuh" #include "libs/cuda_common.hpp" #include "dropc_host.hpp" #include "dropc_dev.hpp" //#define PRINT_MATRIX using namespace std; //--------------------------------------------------------------------- // test util functions //--------------------------------------------------------------------- Matrix rand_col_matrix( int r, int c ) { Matrix m( r, c ); m.randomizeUniform(); m.setTrans( true ); // col major matrix return m; } void print_matrix( const Matrix& m, const string& text ) { m.printShape( text.c_str() ); cout << m.isTrans() << endl; m.print(); } void print_matrix( const NVMatrix& m, const string& text ) { m.printShape( text.c_str() ); cout << m.isTrans() << endl; int r = m.getNumRows(); int c = m.getNumCols(); m.print( r, c ); } void compare_matrix( Matrix& lm, Matrix& rm, const string& text ) { assert( lm.getNumRows() == rm.getNumRows() ); assert( lm.getNumCols() == rm.getNumCols() ); assert( lm.isTrans() == rm.isTrans() ); int r = lm.getNumRows(); int c = lm.getNumCols(); bool isTrans = lm.isTrans(); Matrix diff = rand_col_matrix( r,c ); diff.setTrans( isTrans ); lm.subtract( rm, diff ); #ifdef PRINT_MATRIX print_matrix( diff, text ); #endif cout << text << endl; cout << "mean : " << diff.sum()/(r*c) << endl; cout << "max : " << diff.max() << endl; cout << "min : " << diff.min() << endl; } hipEvent_t start, stop; void startDevTimer() { checkCuda( hipEventCreate( &start ) ); checkCuda( hipEventCreate( &stop ) ); checkCuda( hipEventRecord( start, 0 ) ); } float stopDevTimer() { checkCuda( hipEventRecord( stop, 0 ) ); checkCuda( hipEventSynchronize( stop ) ); float elapsedTime; checkCuda( hipEventElapsedTime( &elapsedTime, start, stop ) ); checkCuda( hipEventDestroy( start ) ); checkCuda( hipEventDestroy( stop ) ); return elapsedTime; } //--------------------------------------------------------------------- void test_fprop() { cout << " Drop Connection fprop: " << endl; //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix x = rand_col_matrix( d,n ); //print_matrix( x, "x matrix" ); Matrix w = rand_col_matrix( n, m ); //print_matrix( w, "w matrix" ); Matrix b = rand_col_matrix( 1, m ); //print_matrix( b, "b matrix" ); b.setTrans( false ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); //print_matrix( mw, "wb matrix" ); Matrix mb = rand_col_matrix( d, m ); mb.biggerThanScalar( 0.5 ); //print_matrix( mb, "mb matrix" ); // output Matrix y = rand_col_matrix( d, m ); Matrix y_prev(y); y_prev.setTrans(true); y.copy( y_prev ); //print_matrix( y, "y before host compute" ); //--------------- // start gpu timer //--------------- startDevTimer(); // call fprop computeFCDropC_fprop_h( x.getData(), w.getData(), b.getData(), // input matrix m, n, d, // dims mw.getData(), mb.getData(), // masks y.getData() // output ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print y #ifdef PRINT_MATRIX print_matrix( y, "y after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix x_dev( d, n, true ); x_dev.copyFromHost( x ); NVMatrix w_dev( n, m, true ); w_dev.copyFromHost( w ); NVMatrix b_dev( 1, m, false ); b_dev.copyFromHost( b ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); NVMatrix mb_dev( d, m, true ); mb_dev.copyFromHost( mb ); // output NVMatrix y_dev( d, m, true ); y_dev.copyFromHost( y_prev ); //print_matrix( y_dev, "y_dev before dev compute" ); //--------------- // start gpu timer //--------------- startDevTimer(); //-------------- // call fprop //-------------- computeFCDropC_fprop_d( x_dev.getDevData(), w_dev.getDevData(), b_dev.getDevData(), // input matrix m, n, d, // dims mw_dev.getDevData(), mb_dev.getDevData(), // masks y_dev.getDevData() // output ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; // host output //print_matrix( y_dev, "y_dev after dev compute" ); //y_dev.print( 0, d, 0, m ); Matrix y2 = rand_col_matrix( d, m ); y_dev.copyToHost( y2 ); #ifdef PRINT_MATRIX print_matrix( y2, "y2 after dev compute" ); #endif // diff compare_matrix( y, y2, "y-y2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } void test_bpropa() { cout << " Drop Connection bprop acts: " << endl; //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix v = rand_col_matrix( d,m ); Matrix w = rand_col_matrix( n,m ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); // output Matrix da = rand_col_matrix(d,n); Matrix da_prev( da ); da_prev.setTrans(true); da.copy( da_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); // call bpropa computeFCDropC_bpropActs_h( v.getData(), w.getData(), m, n, d, 1, mw.getData(), da.getData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print output #ifdef PRINT_MATRIX print_matrix( da, "da after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix v_dev( d, m, true ); v_dev.copyFromHost( v ); NVMatrix w_dev( n, m, true ); w_dev.copyFromHost( w ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); // output NVMatrix da_dev( d, n, true ); da_dev.copyFromHost( da_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); //--------------- // call bpropa //--------------- computeFCDropC_bpropActs_d( v_dev.getDevData(), w_dev.getDevData(), m, n, d, 1, mw_dev.getDevData(), da_dev.getDevData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; Matrix da2 = rand_col_matrix( d, n ); da_dev.copyToHost( da2 ); #ifdef PRINT_MATRIX print_matrix( da2, "da after dev compute" ); #endif // diff compare_matrix( da, da2, "da-da2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } void test_bpropw() { //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix v = rand_col_matrix( d, m ) ; Matrix a = rand_col_matrix( d, n ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); // output Matrix dw = rand_col_matrix( n, m ); Matrix dw_prev( dw ); dw_prev.setTrans(true); dw.copy( dw_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); // call bpropw computeFCDropC_bpropWeights_h( a.getData(), v.getData(), m, n, d, 1, mw.getData(), dw.getData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print output #ifdef PRINT_MATRIX print_matrix( dw, "dw after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix v_dev( d, m, true ); v_dev.copyFromHost( v ); NVMatrix a_dev( d, n, true ); a_dev.copyFromHost( a ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); // output NVMatrix dw_dev( n, m, true ); dw_dev.copyFromHost( dw_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); //--------------- // call bpropw //--------------- computeFCDropC_bpropWeights_d( a_dev.getDevData(), v_dev.getDevData(), m, n, d, 1, mw_dev.getDevData(), dw_dev.getDevData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; Matrix dw2 = rand_col_matrix( n, m ); dw_dev.copyToHost( dw2 ); #ifdef PRINT_MATRIX print_matrix( dw2, "dw after dev compute" ); #endif // diff compare_matrix( dw, dw2, "dw-dw2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } int main(int argc, char* argv[] ) { if( argc > 1 ) { int devId = atoi( argv[1] ); checkCuda( hipSetDevice( devId ) ); cout << "Manually Set Device: " << devId << endl; } test_fprop(); test_bpropa(); test_bpropw(); return 0; }
21ca0b15a24d069780c5e9cc5feba559c69c2801.cu
/** * dropc kerenel testing code */ #include <cassert> #include <iostream> #include <string> #include "libs/matrix.h" #include "libs/nvmatrix.cuh" #include "libs/cuda_common.hpp" #include "dropc_host.hpp" #include "dropc_dev.hpp" //#define PRINT_MATRIX using namespace std; //--------------------------------------------------------------------- // test util functions //--------------------------------------------------------------------- Matrix rand_col_matrix( int r, int c ) { Matrix m( r, c ); m.randomizeUniform(); m.setTrans( true ); // col major matrix return m; } void print_matrix( const Matrix& m, const string& text ) { m.printShape( text.c_str() ); cout << m.isTrans() << endl; m.print(); } void print_matrix( const NVMatrix& m, const string& text ) { m.printShape( text.c_str() ); cout << m.isTrans() << endl; int r = m.getNumRows(); int c = m.getNumCols(); m.print( r, c ); } void compare_matrix( Matrix& lm, Matrix& rm, const string& text ) { assert( lm.getNumRows() == rm.getNumRows() ); assert( lm.getNumCols() == rm.getNumCols() ); assert( lm.isTrans() == rm.isTrans() ); int r = lm.getNumRows(); int c = lm.getNumCols(); bool isTrans = lm.isTrans(); Matrix diff = rand_col_matrix( r,c ); diff.setTrans( isTrans ); lm.subtract( rm, diff ); #ifdef PRINT_MATRIX print_matrix( diff, text ); #endif cout << text << endl; cout << "mean : " << diff.sum()/(r*c) << endl; cout << "max : " << diff.max() << endl; cout << "min : " << diff.min() << endl; } cudaEvent_t start, stop; void startDevTimer() { checkCuda( cudaEventCreate( &start ) ); checkCuda( cudaEventCreate( &stop ) ); checkCuda( cudaEventRecord( start, 0 ) ); } float stopDevTimer() { checkCuda( cudaEventRecord( stop, 0 ) ); checkCuda( cudaEventSynchronize( stop ) ); float elapsedTime; checkCuda( cudaEventElapsedTime( &elapsedTime, start, stop ) ); checkCuda( cudaEventDestroy( start ) ); checkCuda( cudaEventDestroy( stop ) ); return elapsedTime; } //--------------------------------------------------------------------- void test_fprop() { cout << " Drop Connection fprop: " << endl; //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix x = rand_col_matrix( d,n ); //print_matrix( x, "x matrix" ); Matrix w = rand_col_matrix( n, m ); //print_matrix( w, "w matrix" ); Matrix b = rand_col_matrix( 1, m ); //print_matrix( b, "b matrix" ); b.setTrans( false ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); //print_matrix( mw, "wb matrix" ); Matrix mb = rand_col_matrix( d, m ); mb.biggerThanScalar( 0.5 ); //print_matrix( mb, "mb matrix" ); // output Matrix y = rand_col_matrix( d, m ); Matrix y_prev(y); y_prev.setTrans(true); y.copy( y_prev ); //print_matrix( y, "y before host compute" ); //--------------- // start gpu timer //--------------- startDevTimer(); // call fprop computeFCDropC_fprop_h( x.getData(), w.getData(), b.getData(), // input matrix m, n, d, // dims mw.getData(), mb.getData(), // masks y.getData() // output ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print y #ifdef PRINT_MATRIX print_matrix( y, "y after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix x_dev( d, n, true ); x_dev.copyFromHost( x ); NVMatrix w_dev( n, m, true ); w_dev.copyFromHost( w ); NVMatrix b_dev( 1, m, false ); b_dev.copyFromHost( b ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); NVMatrix mb_dev( d, m, true ); mb_dev.copyFromHost( mb ); // output NVMatrix y_dev( d, m, true ); y_dev.copyFromHost( y_prev ); //print_matrix( y_dev, "y_dev before dev compute" ); //--------------- // start gpu timer //--------------- startDevTimer(); //-------------- // call fprop //-------------- computeFCDropC_fprop_d( x_dev.getDevData(), w_dev.getDevData(), b_dev.getDevData(), // input matrix m, n, d, // dims mw_dev.getDevData(), mb_dev.getDevData(), // masks y_dev.getDevData() // output ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; // host output //print_matrix( y_dev, "y_dev after dev compute" ); //y_dev.print( 0, d, 0, m ); Matrix y2 = rand_col_matrix( d, m ); y_dev.copyToHost( y2 ); #ifdef PRINT_MATRIX print_matrix( y2, "y2 after dev compute" ); #endif // diff compare_matrix( y, y2, "y-y2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } void test_bpropa() { cout << " Drop Connection bprop acts: " << endl; //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix v = rand_col_matrix( d,m ); Matrix w = rand_col_matrix( n,m ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); // output Matrix da = rand_col_matrix(d,n); Matrix da_prev( da ); da_prev.setTrans(true); da.copy( da_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); // call bpropa computeFCDropC_bpropActs_h( v.getData(), w.getData(), m, n, d, 1, mw.getData(), da.getData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print output #ifdef PRINT_MATRIX print_matrix( da, "da after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix v_dev( d, m, true ); v_dev.copyFromHost( v ); NVMatrix w_dev( n, m, true ); w_dev.copyFromHost( w ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); // output NVMatrix da_dev( d, n, true ); da_dev.copyFromHost( da_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); //--------------- // call bpropa //--------------- computeFCDropC_bpropActs_d( v_dev.getDevData(), w_dev.getDevData(), m, n, d, 1, mw_dev.getDevData(), da_dev.getDevData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; Matrix da2 = rand_col_matrix( d, n ); da_dev.copyToHost( da2 ); #ifdef PRINT_MATRIX print_matrix( da2, "da after dev compute" ); #endif // diff compare_matrix( da, da2, "da-da2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } void test_bpropw() { //int m = 5; //int n = 8; //int d = 3; int m = 1024; int n = 1024; int d = 128; //int m = 900; //int n = 807; //int d = 110; //----------------------------------------- // host code //----------------------------------------- // input Matrix v = rand_col_matrix( d, m ) ; Matrix a = rand_col_matrix( d, n ); // masks Matrix mw = rand_col_matrix( n, m*d); mw.biggerThanScalar( 0.5 ); // output Matrix dw = rand_col_matrix( n, m ); Matrix dw_prev( dw ); dw_prev.setTrans(true); dw.copy( dw_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); // call bpropw computeFCDropC_bpropWeights_h( a.getData(), v.getData(), m, n, d, 1, mw.getData(), dw.getData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_host = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_host << "ms" << endl; // print output #ifdef PRINT_MATRIX print_matrix( dw, "dw after host compute" ); #endif //----------------------------------------- // dev code //----------------------------------------- // input NVMatrix v_dev( d, m, true ); v_dev.copyFromHost( v ); NVMatrix a_dev( d, n, true ); a_dev.copyFromHost( a ); // masks NVMatrix mw_dev( n, m*d, true ); mw_dev.copyFromHost( mw ); // output NVMatrix dw_dev( n, m, true ); dw_dev.copyFromHost( dw_prev ); //--------------- // start gpu timer //--------------- startDevTimer(); //--------------- // call bpropw //--------------- computeFCDropC_bpropWeights_d( a_dev.getDevData(), v_dev.getDevData(), m, n, d, 1, mw_dev.getDevData(), dw_dev.getDevData(), 1 ); //--------------- // stop gpu timer //--------------- float elapsedTime_dev = stopDevTimer(); cout << "GPU timer time: " << elapsedTime_dev << "ms" << endl; Matrix dw2 = rand_col_matrix( n, m ); dw_dev.copyToHost( dw2 ); #ifdef PRINT_MATRIX print_matrix( dw2, "dw after dev compute" ); #endif // diff compare_matrix( dw, dw2, "dw-dw2" ); // print speed up cout << "speed up: " << elapsedTime_host/elapsedTime_dev << endl; } int main(int argc, char* argv[] ) { if( argc > 1 ) { int devId = atoi( argv[1] ); checkCuda( cudaSetDevice( devId ) ); cout << "Manually Set Device: " << devId << endl; } test_fprop(); test_bpropa(); test_bpropw(); return 0; }
772c374d517ce21db6ddc06896e072fe37672575.hip
// !!! This is a file automatically generated by hipify!!! // The MIT License (MIT) // // Copyright (c) 2014 WUSTL ZPLAB // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // Authors: Erik Hvatum <ice.rikh@gmail.com> #include "_SpatialFilter.h" #include "GilStateScopeOperators.h" #include <hip/hip_runtime.h> #include <hipfft.h> #include <helper_functions.h> #include <helper_cuda.h> #include <iostream> #include <stdexcept> #include <thrust/adjacent_difference.h> #include <thrust/advance.h> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_allocator.h> #include <thrust/device_delete.h> #include <thrust/device_free.h> #include <thrust/device_malloc.h> #include <thrust/device_malloc_allocator.h> #include <thrust/device_new.h> #include <thrust/device_new_allocator.h> #include <thrust/device_ptr.h> #include <thrust/device_reference.h> #include <thrust/device_vector.h> #include <thrust/distance.h> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/fill.h> #include <thrust/find.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/inner_product.h> #include <thrust/logical.h> #include <thrust/memory.h> #include <thrust/merge.h> #include <thrust/mismatch.h> #include <thrust/pair.h> #include <thrust/partition.h> #include <thrust/random.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/replace.h> #include <thrust/reverse.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/swap.h> #include <thrust/system_error.h> #include <thrust/tabulate.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> #include <thrust/tuple.h> #include <thrust/uninitialized_copy.h> #include <thrust/uninitialized_fill.h> #include <thrust/unique.h> #include <thrust/version.h> using namespace thrust::placeholders; #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #define PY_ARRAY_UNIQUE_SYMBOL wautofocuser_ARRAY_API #include <numpy/arrayobject.h> static void* do_static_init() { // import_array() is actually a macro that returns NULL if it fails, so it has to be wrapped in order to be called // from a constructor which necessarily does not return anything import_array(); findCudaDevice(0, nullptr); return reinterpret_cast<void*>(1); } static bool static_inited{false}; _SpatialFilter::_SpatialFilter(std::size_t w, std::size_t h) : m_w(w), m_h(h), m_s(w * h), m_filter(m_s), m_forward_plan(0), m_inverse_plan(0) { if(!static_inited) { do_static_init(); static_inited = true; } if(hipfftPlan2d(&m_forward_plan, m_h, m_w, HIPFFT_C2C) != HIPFFT_SUCCESS) { throw std::string("hipfftPlan2d(..) for forward transformation failed."); } if(hipfftPlan2d(&m_inverse_plan, m_h, m_w, HIPFFT_C2C) != HIPFFT_SUCCESS) { throw std::string("hipfftPlan2d(..) for inverse transformation failed."); } } _SpatialFilter::~_SpatialFilter() { if(m_forward_plan) { hipfftDestroy(m_forward_plan); m_forward_plan = 0; } if(m_inverse_plan) { hipfftDestroy(m_inverse_plan); m_inverse_plan = 0; } } std::size_t _SpatialFilter::get_w() const { return m_w; } std::size_t _SpatialFilter::get_h() const { return m_h; } PyObject* _SpatialFilter::get_filter() const { GilLocker gil_locker; PyObject* ret; npy_intp dims[] = {static_cast<npy_intp>(m_h), static_cast<npy_intp>(m_w)}; ret = PyArray_SimpleNew(2, dims, NPY_FLOAT32); thrust::host_vector<float> temp(m_filter); memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(ret)), (const void*)temp.data(), m_w * m_h * sizeof(float)); return ret; } void _SpatialFilter::set_filter(const float* filter) { m_filter.assign(filter, filter + m_w * m_h); } struct ComplexToReal { __device__ __host__ float operator () (const cuFloatComplex cf) const { return cf.x; } }; struct RealToComplex { __device__ __host__ cuFloatComplex operator () (const float f) const { cuFloatComplex cf; cf.x = f; cf.y = 0; return cf; } }; struct RealComplexMult { __device__ __host__ float2 operator () (const float& r, const cuFloatComplex& c) const { return {r * c.x, r * c.y}; } }; // struct MultiplyRealComponentAndReal // { // __device__ __host__ cuFloatComplex operator () (const cuFloatComplex& c, const float& r) const // { // return c.x * f; // } // }; // template<typename C, typename R> // struct RtoC // { // __device__ __host__ C operator () (const R& r) const // { // C c; // c.x = r; // c.y = 0; // return c; // } // }; // struct Functor // { // __device__ __host__ float operator () (const float& rhs) const // { // return rhs * 2; // } // }; PyObject* _SpatialFilter::apply(const float* image) const { // thrust::host_vector<float> im_h(image, image + m_w * m_h); // thrust::device_vector<float> im_d(im_h); // using namespace thrust::placeholders; // thrust::transform(thrust::device, im_h.begin(), im_h.end(), thrust::device_pointer_cast(im_d.data()), thrust::negate<float>()); // thrust::transform(im_h.begin(), im_h.end(), im_d.begin(), make_cuComplex); // RtoC<cuFloatComplex, float> rtoc; // thrust::transform(thrust::host, im_h.begin(), im_h.end(), im_d.begin(), rtoc); PyObject* ret; thrust::host_vector<cuFloatComplex> image_h(m_s); thrust::transform(image, image + m_s, image_h.begin(), RealToComplex()); thrust::device_vector<cuFloatComplex> image_d(image_h); thrust::device_vector<cuFloatComplex> fft_d(m_s); if(hipfftExecC2C(m_forward_plan, thrust::raw_pointer_cast(image_d.data()), thrust::raw_pointer_cast(fft_d.data()), HIPFFT_FORWARD) != HIPFFT_SUCCESS) { throw std::runtime_error("hipfftExecC2C(..) forward transformation failed."); } thrust::device_vector<cuFloatComplex> fft_t_d(m_s); thrust::transform(thrust::device, m_filter.begin(), m_filter.end(), fft_d.begin(), fft_t_d.begin(), RealComplexMult()); thrust::device_vector<cuFloatComplex> out_d(m_s); if(hipfftExecC2C(m_inverse_plan, thrust::raw_pointer_cast(fft_t_d.data()), thrust::raw_pointer_cast(out_d.data()), HIPFFT_BACKWARD) != HIPFFT_SUCCESS) { throw std::runtime_error("hipfftExecC2C(..) inverse transformation failed."); } thrust::host_vector<cuFloatComplex> out_h(out_d); { GilLocker gil_locker; npy_intp dims[] = {static_cast<npy_intp>(m_h), static_cast<npy_intp>(m_w)}; ret = PyArray_SimpleNew(2, dims, NPY_FLOAT32); thrust::transform(thrust::host, out_h.begin(), out_h.end(), reinterpret_cast<float*>(PyArray_DATA(reinterpret_cast<PyArrayObject*>(ret))), ComplexToReal()); } return ret; }
772c374d517ce21db6ddc06896e072fe37672575.cu
// The MIT License (MIT) // // Copyright (c) 2014 WUSTL ZPLAB // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // Authors: Erik Hvatum <ice.rikh@gmail.com> #include "_SpatialFilter.h" #include "GilStateScopeOperators.h" #include <cuda_runtime.h> #include <cufft.h> #include <helper_functions.h> #include <helper_cuda.h> #include <iostream> #include <stdexcept> #include <thrust/adjacent_difference.h> #include <thrust/advance.h> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_allocator.h> #include <thrust/device_delete.h> #include <thrust/device_free.h> #include <thrust/device_malloc.h> #include <thrust/device_malloc_allocator.h> #include <thrust/device_new.h> #include <thrust/device_new_allocator.h> #include <thrust/device_ptr.h> #include <thrust/device_reference.h> #include <thrust/device_vector.h> #include <thrust/distance.h> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/fill.h> #include <thrust/find.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/inner_product.h> #include <thrust/logical.h> #include <thrust/memory.h> #include <thrust/merge.h> #include <thrust/mismatch.h> #include <thrust/pair.h> #include <thrust/partition.h> #include <thrust/random.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/replace.h> #include <thrust/reverse.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/swap.h> #include <thrust/system_error.h> #include <thrust/tabulate.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> #include <thrust/tuple.h> #include <thrust/uninitialized_copy.h> #include <thrust/uninitialized_fill.h> #include <thrust/unique.h> #include <thrust/version.h> using namespace thrust::placeholders; #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #define PY_ARRAY_UNIQUE_SYMBOL wautofocuser_ARRAY_API #include <numpy/arrayobject.h> static void* do_static_init() { // import_array() is actually a macro that returns NULL if it fails, so it has to be wrapped in order to be called // from a constructor which necessarily does not return anything import_array(); findCudaDevice(0, nullptr); return reinterpret_cast<void*>(1); } static bool static_inited{false}; _SpatialFilter::_SpatialFilter(std::size_t w, std::size_t h) : m_w(w), m_h(h), m_s(w * h), m_filter(m_s), m_forward_plan(0), m_inverse_plan(0) { if(!static_inited) { do_static_init(); static_inited = true; } if(cufftPlan2d(&m_forward_plan, m_h, m_w, CUFFT_C2C) != CUFFT_SUCCESS) { throw std::string("cufftPlan2d(..) for forward transformation failed."); } if(cufftPlan2d(&m_inverse_plan, m_h, m_w, CUFFT_C2C) != CUFFT_SUCCESS) { throw std::string("cufftPlan2d(..) for inverse transformation failed."); } } _SpatialFilter::~_SpatialFilter() { if(m_forward_plan) { cufftDestroy(m_forward_plan); m_forward_plan = 0; } if(m_inverse_plan) { cufftDestroy(m_inverse_plan); m_inverse_plan = 0; } } std::size_t _SpatialFilter::get_w() const { return m_w; } std::size_t _SpatialFilter::get_h() const { return m_h; } PyObject* _SpatialFilter::get_filter() const { GilLocker gil_locker; PyObject* ret; npy_intp dims[] = {static_cast<npy_intp>(m_h), static_cast<npy_intp>(m_w)}; ret = PyArray_SimpleNew(2, dims, NPY_FLOAT32); thrust::host_vector<float> temp(m_filter); memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(ret)), (const void*)temp.data(), m_w * m_h * sizeof(float)); return ret; } void _SpatialFilter::set_filter(const float* filter) { m_filter.assign(filter, filter + m_w * m_h); } struct ComplexToReal { __device__ __host__ float operator () (const cuFloatComplex cf) const { return cf.x; } }; struct RealToComplex { __device__ __host__ cuFloatComplex operator () (const float f) const { cuFloatComplex cf; cf.x = f; cf.y = 0; return cf; } }; struct RealComplexMult { __device__ __host__ float2 operator () (const float& r, const cuFloatComplex& c) const { return {r * c.x, r * c.y}; } }; // struct MultiplyRealComponentAndReal // { // __device__ __host__ cuFloatComplex operator () (const cuFloatComplex& c, const float& r) const // { // return c.x * f; // } // }; // template<typename C, typename R> // struct RtoC // { // __device__ __host__ C operator () (const R& r) const // { // C c; // c.x = r; // c.y = 0; // return c; // } // }; // struct Functor // { // __device__ __host__ float operator () (const float& rhs) const // { // return rhs * 2; // } // }; PyObject* _SpatialFilter::apply(const float* image) const { // thrust::host_vector<float> im_h(image, image + m_w * m_h); // thrust::device_vector<float> im_d(im_h); // using namespace thrust::placeholders; // thrust::transform(thrust::device, im_h.begin(), im_h.end(), thrust::device_pointer_cast(im_d.data()), thrust::negate<float>()); // thrust::transform(im_h.begin(), im_h.end(), im_d.begin(), make_cuComplex); // RtoC<cuFloatComplex, float> rtoc; // thrust::transform(thrust::host, im_h.begin(), im_h.end(), im_d.begin(), rtoc); PyObject* ret; thrust::host_vector<cuFloatComplex> image_h(m_s); thrust::transform(image, image + m_s, image_h.begin(), RealToComplex()); thrust::device_vector<cuFloatComplex> image_d(image_h); thrust::device_vector<cuFloatComplex> fft_d(m_s); if(cufftExecC2C(m_forward_plan, thrust::raw_pointer_cast(image_d.data()), thrust::raw_pointer_cast(fft_d.data()), CUFFT_FORWARD) != CUFFT_SUCCESS) { throw std::runtime_error("cufftExecC2C(..) forward transformation failed."); } thrust::device_vector<cuFloatComplex> fft_t_d(m_s); thrust::transform(thrust::device, m_filter.begin(), m_filter.end(), fft_d.begin(), fft_t_d.begin(), RealComplexMult()); thrust::device_vector<cuFloatComplex> out_d(m_s); if(cufftExecC2C(m_inverse_plan, thrust::raw_pointer_cast(fft_t_d.data()), thrust::raw_pointer_cast(out_d.data()), CUFFT_INVERSE) != CUFFT_SUCCESS) { throw std::runtime_error("cufftExecC2C(..) inverse transformation failed."); } thrust::host_vector<cuFloatComplex> out_h(out_d); { GilLocker gil_locker; npy_intp dims[] = {static_cast<npy_intp>(m_h), static_cast<npy_intp>(m_w)}; ret = PyArray_SimpleNew(2, dims, NPY_FLOAT32); thrust::transform(thrust::host, out_h.begin(), out_h.end(), reinterpret_cast<float*>(PyArray_DATA(reinterpret_cast<PyArrayObject*>(ret))), ComplexToReal()); } return ret; }
12cdd195edc855fb2ee8ae91a4fc37af64d3b4f6.hip
// !!! This is a file automatically generated by hipify!!! // #CSCS CUDA Training // // #Example 6 - (block) matrix-matrix multiply with dynamically allocated shared memory // // #Author: Ugo Varetto // // #Goal: multiply two matrices make use of shared memory to accelerate the computation; // the size of the shared memory buffer must be specified at run-time // // #Rationale: shows how shared memory can be dynamically allocated at kernel launch and // used to accelerate matrix-matrix operations // #Solution: copy matrix blocks into shared memory and perform matrix-matrix multiply // on shared memory buffers // // #Code: 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) initialize data directly on GPU // 4) read initialized data back from GPU so that we can use the same data on the CPU // 5) create events // 6) issue time record request on start event // 7) launch kernel specifying the amount of shared memory to use = 2 x block size bytes // 8) issue time record request on stop event // 9) synchronize stop event with end of kernel execution // 10) read data back and print upper left corner of result matrix // 11) perform computation on CPU and print upper left corner of result matrix // 12) [optional] compare results; to avoid using a big eps (>=10^-4) use double precision // // #Compilation: nvcc -arch=sm_13 6_matmul-dynamic-shared-mem.cu -o matmul-dynamic-shared-mem // // #Execution: ./matmul-dynamic-shared-mem // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // hipDeviceSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like hipMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision // // #Note: the example can be extended to read configuration data and matrix size from the command line // // #Note: try on both GT200 and GF100 architectures to verify the impact of L1 cache //#include <hip/hip_runtime.h> // automatically added by nvcc #include <vector> #include <iostream> typedef float real_t; // return matrix element given block and indices of element in block __device__ real_t get_matrix_element( const real_t* m, //matrix int blockCol, //column index of output block int blockRow, //row index of output row int col, //local column index of block element int row, //local row index of block element int num_columns //number of columns of matrix 'm' ) { return m[ ( blockRow * blockDim.y + row ) * num_columns + blockCol * blockDim.x + col ]; } // shared memory: it is allowed to have only a single shared memory buffer // declared as a global variable; the size of such buffer // is specified at kernel launch as the third parameter // in the <<< >>> operator extern __shared__ real_t cache[]; // compute block matrix multiply: // - matrix block size == tile size == CUDA thread block size // - grid (blocks x threads per block) matches the output matrix layout // workflow: // 1) copy block from input matrices into local cache buffers // 2) wait until all threads are done copying // 3) identify output block location = C,R // 4) iterate over blocks on row R in matrix 1 and blocks on column C in matrix 2; // for each block: // 4.1) output element = c,r -> maps to current thread's x,y values // 4.2) add to output element scalar product of row r in local cache 1 (matrix 1) // and column c in local cache 2 (matrix 2) // 5) wait to perform next iteration until all block element have been computed // __global__ void block_matmul( const real_t* m1, const real_t* m2, real_t* mout, int m1_columns, int m2_columns ) { const int TILE_COLUMNS = blockDim.x; const int TILE_ROWS = blockDim.y; real_t* M1 = &cache[ 0 ]; real_t* M2 = &cache[ TILE_COLUMNS * TILE_ROWS]; const int blockRow = blockIdx.y; const int blockCol = blockIdx.x; const int row = threadIdx.y; const int col = threadIdx.x; real_t out = 0.f; for( int b = 0; b != m1_columns / TILE_COLUMNS; ++b ) { //copy data into shared memory M1[ row * TILE_COLUMNS + col ] = get_matrix_element( m1, b, blockRow, col, row, m1_columns ); M2[ row * TILE_COLUMNS + col ] = get_matrix_element( m2, blockCol, b, col, row, m2_columns ); __syncthreads(); // required to guarantee that data are computed before next step // where a thread accesses data computed by other threads for( int k = 0; k != TILE_COLUMNS; ++k ) { out += M1[ row * TILE_COLUMNS + k ] * M2[ k * TILE_COLUMNS + col ]; } __syncthreads(); // required to avoid that some threads start modifying // data in cache before all threads have exited for loop } mout[ ( blockRow * blockDim.y + row ) * m2_columns + blockCol * blockDim.x + col ] = out; } // simple matrix multiplication; grid layout matches output matrix; note that // although this method is slower than the block multiply, it is still much faster // than running on the cpu __global__ void matmul( const real_t* m1, const real_t* m2, real_t* mout, int m1_columns, int m2_columns ) { // m1_columns == m2_rows // mout = m1_rows x m2_columns const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; real_t out = 0.f;//m1[ row * m1_columns + 0 ] * m2[ 0 * m2_columns + col ]; for( int k = 0; k != m1_columns; ++k ) { out += m1[ row * m1_columns + k ] * m2[ k * m2_columns + col ]; } mout[ row * m2_columns + col ] = out; } __global__ void init_matrix( real_t* m ) { const int c = threadIdx.x + blockDim.x * blockIdx.x; const int r = threadIdx.y + blockDim.y * blockIdx.y; const int idx = c + gridDim.x * blockDim.x * r; const real_t s = gridDim.x * gridDim.y; m[ idx ] = real_t( idx ) / s; } // standard matrix-matrix multiply void matmul_ref( const real_t* m1, const real_t* m2, real_t* mout, int m1_rows, int m1_columns, int m2_columns ) { for( int row = 0; row != m1_rows; ++row ) { for( int col = 0; col != m2_columns; ++col ) { mout[ row * m2_columns + col ] = 0.f; for( int k = 0; k != m1_columns; ++k ) { mout[ row * m2_columns + col ] += m1[ row * m1_columns + k ] * m2[ k * m2_columns + col ]; } } } } // compare floating point arrays bool compare( const real_t* v1, const real_t* v2, size_t N, real_t eps ) { for( int i = 0; i != N; ++i ) { if( ::fabs( v1[ i ] - v2[ i ] ) > eps ) return false; } return true; } // print matrix; 'stride' in case we want to print only a subset // of the matrix: in this case c != stride void print_matrix( const real_t* m, int r, int c, int stride ) { for( int i = 0; i != r; ++i ) { for( int j = 0; j != c; ++j ) std::cout << m[ i * stride + j ] << ' '; std::cout << '\n'; } std::cout << std::endl; } //------------------------------------------------------------------------------ int main(int argc, char** argv ) { //1024 x 1024 matrices const dim3 BLOCKS( 64, 64 ); const dim3 THREADS_PER_BLOCK( 16, 16 ); const int ROWS = BLOCKS.y * THREADS_PER_BLOCK.y; const int COLUMNS = BLOCKS.x * THREADS_PER_BLOCK.x; const size_t ARRAY_SIZE = ROWS * COLUMNS; const size_t BYTE_SIZE = ARRAY_SIZE * sizeof( real_t ); // allocate enough memory to store one block from matrix 1 and one block from matrix 2 const size_t SHARED_MEMORY_SIZE = 2 * THREADS_PER_BLOCK.x * THREADS_PER_BLOCK.y * sizeof( real_t ); // device storage for gpu computation real_t* dev_m1 = 0; real_t* dev_m2 = 0; real_t* dev_mout = 0; hipMalloc( &dev_m1, BYTE_SIZE ); hipMalloc( &dev_m2, BYTE_SIZE ); hipMalloc( &dev_mout, BYTE_SIZE ); //host storage for reading the output of gpu computation std::vector< real_t> host_mout( ARRAY_SIZE ); // host storage for cpu computation std::vector< real_t > m1( ARRAY_SIZE ); std::vector< real_t > m2( ARRAY_SIZE ); std::vector< real_t > mout( ARRAY_SIZE ); // initialize matrix with kernel; much faster than using // for loops on the cpu hipLaunchKernelGGL(( init_matrix), dim3(dim3( COLUMNS, ROWS )), dim3(1), 0, 0, dev_m1 ); hipLaunchKernelGGL(( init_matrix), dim3(dim3( COLUMNS, ROWS )), dim3(1), 0, 0, dev_m2 ); // copy initialized data into host arrays for further processing on the gpu hipMemcpy( &m1[ 0 ], dev_m1, BYTE_SIZE, hipMemcpyDeviceToHost ); hipMemcpy( &m2[ 0 ], dev_m2, BYTE_SIZE, hipMemcpyDeviceToHost ); // print upper 4x4 left corner of input matrix 1 std::cout << "INPUT MATRIX 1 - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &m1[ 0 ], 4, 4, COLUMNS ); // print upper 4x4 left corner of input matrix 2 std::cout << "INPUT MATRIX 2 - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &m2[ 0 ], 4, 4, COLUMNS ); // create events for timing execution hipEvent_t start = hipEvent_t(); hipEvent_t stop = hipEvent_t(); hipEventCreate( &start ); hipEventCreate( &stop ); // record time into start event hipEventRecord( start, 0 ); // 0 is the default stream id #ifdef BLOCK_MULTIPLY // execute kernel hipLaunchKernelGGL(( block_matmul), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), SHARED_MEMORY_SIZE , 0, dev_m1, dev_m2, dev_mout, COLUMNS, COLUMNS ); #else hipLaunchKernelGGL(( matmul), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_m1, dev_m2, dev_mout, COLUMNS, COLUMNS ); #endif // issue request to record time into stop event hipEventRecord( stop, 0 ); // synchronize stop event to wait for end of kernel execution on stream 0 hipEventSynchronize( stop ); // compute elapsed time (done by CUDA run-time) float elapsed = 0.f; hipEventElapsedTime( &elapsed, start, stop ); std::cout << "Elapsed time (ms): " << elapsed << std::endl; // copy output data from device(gpu) to host(cpu) hipMemcpy( &host_mout[ 0 ], dev_mout, BYTE_SIZE, hipMemcpyDeviceToHost ); // print upper 4x4 corner of output matrix std::cout << "\nGPU OUTPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &host_mout[ 0 ], 4, 4, COLUMNS ); // compute on cpu matmul_ref( &m1[ 0 ], &m2[ 0 ], &mout[ 0 ], ROWS, COLUMNS, COLUMNS ); // print upper 4x4 corner of output matrix std::cout << "\nCPU OUTPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &mout[ 0 ], 4, 4, COLUMNS ); #ifdef COMPARE_RESULTS // warning: requires real_t = double to pass std::cout << "Comparing... "; if( compare( &host_mout[ 0 ], &mout[ 0 ], ARRAY_SIZE, 0.00001 ) ) std::cout << "PASSED" << std::endl; else std::cout << "FAILED" << std::endl; #endif // free memory hipFree( dev_m1 ); hipFree( dev_m2 ); hipFree( dev_mout ); // release events hipEventDestroy( start ); hipEventDestroy( stop ); return 0; }
12cdd195edc855fb2ee8ae91a4fc37af64d3b4f6.cu
// #CSCS CUDA Training // // #Example 6 - (block) matrix-matrix multiply with dynamically allocated shared memory // // #Author: Ugo Varetto // // #Goal: multiply two matrices make use of shared memory to accelerate the computation; // the size of the shared memory buffer must be specified at run-time // // #Rationale: shows how shared memory can be dynamically allocated at kernel launch and // used to accelerate matrix-matrix operations // #Solution: copy matrix blocks into shared memory and perform matrix-matrix multiply // on shared memory buffers // // #Code: 1) compute launch grid configuration // 2) allocate data on host(cpu) and device(gpu) // 3) initialize data directly on GPU // 4) read initialized data back from GPU so that we can use the same data on the CPU // 5) create events // 6) issue time record request on start event // 7) launch kernel specifying the amount of shared memory to use = 2 x block size bytes // 8) issue time record request on stop event // 9) synchronize stop event with end of kernel execution // 10) read data back and print upper left corner of result matrix // 11) perform computation on CPU and print upper left corner of result matrix // 12) [optional] compare results; to avoid using a big eps (>=10^-4) use double precision // // #Compilation: nvcc -arch=sm_13 6_matmul-dynamic-shared-mem.cu -o matmul-dynamic-shared-mem // // #Execution: ./matmul-dynamic-shared-mem // // #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to // cudaThreadSynchronize() is required to wait for the end of kernel execution from // a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost) // kernel execution is guaranteed to be terminated before data are copied // // #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better // // #Note: -arch=sm_13 is the lowest architecture version that supports double precision // // #Note: the example can be extended to read configuration data and matrix size from the command line // // #Note: try on both GT200 and GF100 architectures to verify the impact of L1 cache //#include <cuda_runtime.h> // automatically added by nvcc #include <vector> #include <iostream> typedef float real_t; // return matrix element given block and indices of element in block __device__ real_t get_matrix_element( const real_t* m, //matrix int blockCol, //column index of output block int blockRow, //row index of output row int col, //local column index of block element int row, //local row index of block element int num_columns //number of columns of matrix 'm' ) { return m[ ( blockRow * blockDim.y + row ) * num_columns + blockCol * blockDim.x + col ]; } // shared memory: it is allowed to have only a single shared memory buffer // declared as a global variable; the size of such buffer // is specified at kernel launch as the third parameter // in the <<< >>> operator extern __shared__ real_t cache[]; // compute block matrix multiply: // - matrix block size == tile size == CUDA thread block size // - grid (blocks x threads per block) matches the output matrix layout // workflow: // 1) copy block from input matrices into local cache buffers // 2) wait until all threads are done copying // 3) identify output block location = C,R // 4) iterate over blocks on row R in matrix 1 and blocks on column C in matrix 2; // for each block: // 4.1) output element = c,r -> maps to current thread's x,y values // 4.2) add to output element scalar product of row r in local cache 1 (matrix 1) // and column c in local cache 2 (matrix 2) // 5) wait to perform next iteration until all block element have been computed // __global__ void block_matmul( const real_t* m1, const real_t* m2, real_t* mout, int m1_columns, int m2_columns ) { const int TILE_COLUMNS = blockDim.x; const int TILE_ROWS = blockDim.y; real_t* M1 = &cache[ 0 ]; real_t* M2 = &cache[ TILE_COLUMNS * TILE_ROWS]; const int blockRow = blockIdx.y; const int blockCol = blockIdx.x; const int row = threadIdx.y; const int col = threadIdx.x; real_t out = 0.f; for( int b = 0; b != m1_columns / TILE_COLUMNS; ++b ) { //copy data into shared memory M1[ row * TILE_COLUMNS + col ] = get_matrix_element( m1, b, blockRow, col, row, m1_columns ); M2[ row * TILE_COLUMNS + col ] = get_matrix_element( m2, blockCol, b, col, row, m2_columns ); __syncthreads(); // required to guarantee that data are computed before next step // where a thread accesses data computed by other threads for( int k = 0; k != TILE_COLUMNS; ++k ) { out += M1[ row * TILE_COLUMNS + k ] * M2[ k * TILE_COLUMNS + col ]; } __syncthreads(); // required to avoid that some threads start modifying // data in cache before all threads have exited for loop } mout[ ( blockRow * blockDim.y + row ) * m2_columns + blockCol * blockDim.x + col ] = out; } // simple matrix multiplication; grid layout matches output matrix; note that // although this method is slower than the block multiply, it is still much faster // than running on the cpu __global__ void matmul( const real_t* m1, const real_t* m2, real_t* mout, int m1_columns, int m2_columns ) { // m1_columns == m2_rows // mout = m1_rows x m2_columns const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; real_t out = 0.f;//m1[ row * m1_columns + 0 ] * m2[ 0 * m2_columns + col ]; for( int k = 0; k != m1_columns; ++k ) { out += m1[ row * m1_columns + k ] * m2[ k * m2_columns + col ]; } mout[ row * m2_columns + col ] = out; } __global__ void init_matrix( real_t* m ) { const int c = threadIdx.x + blockDim.x * blockIdx.x; const int r = threadIdx.y + blockDim.y * blockIdx.y; const int idx = c + gridDim.x * blockDim.x * r; const real_t s = gridDim.x * gridDim.y; m[ idx ] = real_t( idx ) / s; } // standard matrix-matrix multiply void matmul_ref( const real_t* m1, const real_t* m2, real_t* mout, int m1_rows, int m1_columns, int m2_columns ) { for( int row = 0; row != m1_rows; ++row ) { for( int col = 0; col != m2_columns; ++col ) { mout[ row * m2_columns + col ] = 0.f; for( int k = 0; k != m1_columns; ++k ) { mout[ row * m2_columns + col ] += m1[ row * m1_columns + k ] * m2[ k * m2_columns + col ]; } } } } // compare floating point arrays bool compare( const real_t* v1, const real_t* v2, size_t N, real_t eps ) { for( int i = 0; i != N; ++i ) { if( std::fabs( v1[ i ] - v2[ i ] ) > eps ) return false; } return true; } // print matrix; 'stride' in case we want to print only a subset // of the matrix: in this case c != stride void print_matrix( const real_t* m, int r, int c, int stride ) { for( int i = 0; i != r; ++i ) { for( int j = 0; j != c; ++j ) std::cout << m[ i * stride + j ] << ' '; std::cout << '\n'; } std::cout << std::endl; } //------------------------------------------------------------------------------ int main(int argc, char** argv ) { //1024 x 1024 matrices const dim3 BLOCKS( 64, 64 ); const dim3 THREADS_PER_BLOCK( 16, 16 ); const int ROWS = BLOCKS.y * THREADS_PER_BLOCK.y; const int COLUMNS = BLOCKS.x * THREADS_PER_BLOCK.x; const size_t ARRAY_SIZE = ROWS * COLUMNS; const size_t BYTE_SIZE = ARRAY_SIZE * sizeof( real_t ); // allocate enough memory to store one block from matrix 1 and one block from matrix 2 const size_t SHARED_MEMORY_SIZE = 2 * THREADS_PER_BLOCK.x * THREADS_PER_BLOCK.y * sizeof( real_t ); // device storage for gpu computation real_t* dev_m1 = 0; real_t* dev_m2 = 0; real_t* dev_mout = 0; cudaMalloc( &dev_m1, BYTE_SIZE ); cudaMalloc( &dev_m2, BYTE_SIZE ); cudaMalloc( &dev_mout, BYTE_SIZE ); //host storage for reading the output of gpu computation std::vector< real_t> host_mout( ARRAY_SIZE ); // host storage for cpu computation std::vector< real_t > m1( ARRAY_SIZE ); std::vector< real_t > m2( ARRAY_SIZE ); std::vector< real_t > mout( ARRAY_SIZE ); // initialize matrix with kernel; much faster than using // for loops on the cpu init_matrix<<<dim3( COLUMNS, ROWS ), 1>>>( dev_m1 ); init_matrix<<<dim3( COLUMNS, ROWS ), 1>>>( dev_m2 ); // copy initialized data into host arrays for further processing on the gpu cudaMemcpy( &m1[ 0 ], dev_m1, BYTE_SIZE, cudaMemcpyDeviceToHost ); cudaMemcpy( &m2[ 0 ], dev_m2, BYTE_SIZE, cudaMemcpyDeviceToHost ); // print upper 4x4 left corner of input matrix 1 std::cout << "INPUT MATRIX 1 - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &m1[ 0 ], 4, 4, COLUMNS ); // print upper 4x4 left corner of input matrix 2 std::cout << "INPUT MATRIX 2 - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &m2[ 0 ], 4, 4, COLUMNS ); // create events for timing execution cudaEvent_t start = cudaEvent_t(); cudaEvent_t stop = cudaEvent_t(); cudaEventCreate( &start ); cudaEventCreate( &stop ); // record time into start event cudaEventRecord( start, 0 ); // 0 is the default stream id #ifdef BLOCK_MULTIPLY // execute kernel block_matmul<<<BLOCKS, THREADS_PER_BLOCK, SHARED_MEMORY_SIZE >>>( dev_m1, dev_m2, dev_mout, COLUMNS, COLUMNS ); #else matmul<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_m1, dev_m2, dev_mout, COLUMNS, COLUMNS ); #endif // issue request to record time into stop event cudaEventRecord( stop, 0 ); // synchronize stop event to wait for end of kernel execution on stream 0 cudaEventSynchronize( stop ); // compute elapsed time (done by CUDA run-time) float elapsed = 0.f; cudaEventElapsedTime( &elapsed, start, stop ); std::cout << "Elapsed time (ms): " << elapsed << std::endl; // copy output data from device(gpu) to host(cpu) cudaMemcpy( &host_mout[ 0 ], dev_mout, BYTE_SIZE, cudaMemcpyDeviceToHost ); // print upper 4x4 corner of output matrix std::cout << "\nGPU OUTPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &host_mout[ 0 ], 4, 4, COLUMNS ); // compute on cpu matmul_ref( &m1[ 0 ], &m2[ 0 ], &mout[ 0 ], ROWS, COLUMNS, COLUMNS ); // print upper 4x4 corner of output matrix std::cout << "\nCPU OUTPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl; print_matrix( &mout[ 0 ], 4, 4, COLUMNS ); #ifdef COMPARE_RESULTS // warning: requires real_t = double to pass std::cout << "Comparing... "; if( compare( &host_mout[ 0 ], &mout[ 0 ], ARRAY_SIZE, 0.00001 ) ) std::cout << "PASSED" << std::endl; else std::cout << "FAILED" << std::endl; #endif // free memory cudaFree( dev_m1 ); cudaFree( dev_m2 ); cudaFree( dev_mout ); // release events cudaEventDestroy( start ); cudaEventDestroy( stop ); return 0; }
157c831477cc17a69ca16add29257c9b4e36bf08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgecsrmv.cu, normal z -> c, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void cmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaFloatComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; magmaFloatComplex val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors hipLaunchKernelGGL(( cmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream(), m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
157c831477cc17a69ca16add29257c9b4e36bf08.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgecsrmv.cu, normal z -> c, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void cmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaFloatComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; magmaFloatComplex val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors cmgecsrmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream()>>> (m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
b28d9fbff93828ec794383ef56ab00e9b3a61ee2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "ew_op_gpu.h" #include "cu_utils.h" // y = a*x + b template <typename T, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_axpb_forward( T* Y, const T* __restrict__ X, const float* __restrict__ A, const float* __restrict__ B, int CDHW, int DHW, bool bA, bool bB) { const int tid = threadIdx.x; const int c = blockIdx.x; const int n = blockIdx.y; int image_offset = n * CDHW + c * DHW; X += image_offset; Y += image_offset; float a = bA ? A[c] : 1.0f; float b = bB ? B[c] : 0.0f; for (int i = tid; i < DHW; i += THREADS) { float x = load(X, i); store(Y, a*x + b, i); } } // dx = a * dy // da = sum(dy * x) // db = sum(dy) template <typename TX, typename TY, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_axpb_backward( TY* DX, float* DA, float* DB, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ A, int CDHW, int NDHW, int DHW, int magic_DHW, int shift_DHW, bool bDB) { __shared__ float shareDA[THREADS>>5]; __shared__ float shareDB[THREADS>>5]; const int tid = threadIdx.x; const int c = blockIdx.x; int image_offset = c * DHW; DX += image_offset; DY += image_offset; X += image_offset; float a = A[c]; float da = 0.0f; float db = 0.0f; for (int ndhw = tid; ndhw < NDHW; ndhw += THREADS) { int n = div64(ndhw, magic_DHW, shift_DHW); int dhw = ndhw - n*DHW; int i = n * CDHW + dhw; float dy = load(DY, i); float x = load( X, i); da += dy * x; db += dy; store(DX, a*dy, i); } // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) { da += shfl_xor(da, i); db += shfl_xor(db, i); } // first thread of each warp store to shared if ((tid & 31) == 0) { shareDA[tid >> 5] = da; shareDB[tid >> 5] = db; } __syncthreads(); if (tid < (THREADS>>5)) { // first warp loads all prior reductions da = shareDA[tid]; db = shareDB[tid]; // reduce within this last warp #pragma unroll for (int i = (THREADS>>6); i > 0; i >>= 1) { da += shfl_xor(da, i); db += shfl_xor(db, i); } // single thread outputs final reductions if (tid == 0) { DA[c] = da; if (bDB) DB[c] = db; } } } // db = sum(dy) template <typename T, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_xpb_backward( float* DB, const T* __restrict__ DY, int CDHW, int NDHW, int DHW, int magic_DHW, int shift_DHW) { __shared__ float shareDB[THREADS>>5]; const int tid = threadIdx.x; const int c = blockIdx.x; DY += c * DHW; float db = 0.0f; for (int ndhw = tid; ndhw < NDHW; ndhw += THREADS) { int n = div64(ndhw, magic_DHW, shift_DHW); int dhw = ndhw - n*DHW; int i = n * CDHW + dhw; float dy = load(DY, i); db += dy; } // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) db += shfl_xor(db, i); // first thread of each warp store to shared if ((tid & 31) == 0) shareDB[tid >> 5] = db; __syncthreads(); if (tid < (THREADS>>5)) { // first warp loads all prior reductions db = shareDB[tid]; // reduce within this last warp #pragma unroll for (int i = (THREADS>>6); i > 0; i >>= 1) db += shfl_xor(db, i); // single thread outputs final reductions if (tid == 0) DB[c] = db; } } template <typename T> bool CWiseLinearAXPB_Forward(hipStream_t stream, T* y, const T* x, const float* a, const float* b, int N, int C, int DHW) { dim3 grid(C, N, 1); if (DHW < 128*8) hipLaunchKernelGGL(( cwise_linear_axpb_forward<T, 32>), dim3(grid), dim3(32), 0, stream, y, x, a, b, C*DHW, DHW, a!=0, b!=0); else if (DHW < 512*8) hipLaunchKernelGGL(( cwise_linear_axpb_forward<T,128>), dim3(grid), dim3(128), 0, stream, y, x, a, b, C*DHW, DHW, a!=0, b!=0); else hipLaunchKernelGGL(( cwise_linear_axpb_forward<T,512>), dim3(grid), dim3(512), 0, stream, y, x, a, b, C*DHW, DHW, a!=0, b!=0); return true; // TODO } template <typename TX, typename TY> bool CWiseLinearAXPB_Backward(hipStream_t stream, TY* dx, float* da, float* db, const TY* dy, const TX* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW) { dim3 grid(C, 1, 1); if (NDHW < 256*8) hipLaunchKernelGGL(( cwise_linear_axpb_backward<TX,TY, 64>), dim3(grid), dim3(64), 0, stream, dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); else if (NDHW < 1024*8) hipLaunchKernelGGL(( cwise_linear_axpb_backward<TX,TY, 256>), dim3(grid), dim3(256), 0, stream, dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); else hipLaunchKernelGGL(( cwise_linear_axpb_backward<TX,TY,1024>), dim3(grid), dim3(1024), 0, stream, dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); return true; // TODO } template <typename T> bool CWiseLinearXPB_Backward(hipStream_t stream, float* db, const T* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW) { dim3 grid(C, 1, 1); if (NDHW < 256*8) hipLaunchKernelGGL(( cwise_linear_xpb_backward<T, 64>), dim3(grid), dim3(64), 0, stream, db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); else if (NDHW < 1024*8) hipLaunchKernelGGL(( cwise_linear_xpb_backward<T, 256>), dim3(grid), dim3(256), 0, stream, db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); else hipLaunchKernelGGL(( cwise_linear_xpb_backward<T,1024>), dim3(grid), dim3(1024), 0, stream, db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); return true; // TODO } template bool CWiseLinearAXPB_Forward<float>(hipStream_t stream, float* y, const float* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Forward<ehalf>(hipStream_t stream, ehalf* y, const ehalf* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Forward<bhalf>(hipStream_t stream, bhalf* y, const bhalf* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Backward<float,float>(hipStream_t stream, float* dx, float* da, float* db, const float* dy, const float* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<ehalf,ehalf>(hipStream_t stream, ehalf* dx, float* da, float* db, const ehalf* dy, const ehalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<bhalf,bhalf>(hipStream_t stream, bhalf* dx, float* da, float* db, const bhalf* dy, const bhalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<ehalf,float>(hipStream_t stream, float* dx, float* da, float* db, const float* dy, const ehalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<float>(hipStream_t stream, float* db, const float* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<ehalf>(hipStream_t stream, float* db, const ehalf* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<bhalf>(hipStream_t stream, float* db, const bhalf* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); #endif
b28d9fbff93828ec794383ef56ab00e9b3a61ee2.cu
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include "ew_op_gpu.h" #include "cu_utils.h" // y = a*x + b template <typename T, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_axpb_forward( T* Y, const T* __restrict__ X, const float* __restrict__ A, const float* __restrict__ B, int CDHW, int DHW, bool bA, bool bB) { const int tid = threadIdx.x; const int c = blockIdx.x; const int n = blockIdx.y; int image_offset = n * CDHW + c * DHW; X += image_offset; Y += image_offset; float a = bA ? A[c] : 1.0f; float b = bB ? B[c] : 0.0f; for (int i = tid; i < DHW; i += THREADS) { float x = load(X, i); store(Y, a*x + b, i); } } // dx = a * dy // da = sum(dy * x) // db = sum(dy) template <typename TX, typename TY, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_axpb_backward( TY* DX, float* DA, float* DB, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ A, int CDHW, int NDHW, int DHW, int magic_DHW, int shift_DHW, bool bDB) { __shared__ float shareDA[THREADS>>5]; __shared__ float shareDB[THREADS>>5]; const int tid = threadIdx.x; const int c = blockIdx.x; int image_offset = c * DHW; DX += image_offset; DY += image_offset; X += image_offset; float a = A[c]; float da = 0.0f; float db = 0.0f; for (int ndhw = tid; ndhw < NDHW; ndhw += THREADS) { int n = div64(ndhw, magic_DHW, shift_DHW); int dhw = ndhw - n*DHW; int i = n * CDHW + dhw; float dy = load(DY, i); float x = load( X, i); da += dy * x; db += dy; store(DX, a*dy, i); } // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) { da += shfl_xor(da, i); db += shfl_xor(db, i); } // first thread of each warp store to shared if ((tid & 31) == 0) { shareDA[tid >> 5] = da; shareDB[tid >> 5] = db; } __syncthreads(); if (tid < (THREADS>>5)) { // first warp loads all prior reductions da = shareDA[tid]; db = shareDB[tid]; // reduce within this last warp #pragma unroll for (int i = (THREADS>>6); i > 0; i >>= 1) { da += shfl_xor(da, i); db += shfl_xor(db, i); } // single thread outputs final reductions if (tid == 0) { DA[c] = da; if (bDB) DB[c] = db; } } } // db = sum(dy) template <typename T, int THREADS> __global__ void __launch_bounds__(THREADS) cwise_linear_xpb_backward( float* DB, const T* __restrict__ DY, int CDHW, int NDHW, int DHW, int magic_DHW, int shift_DHW) { __shared__ float shareDB[THREADS>>5]; const int tid = threadIdx.x; const int c = blockIdx.x; DY += c * DHW; float db = 0.0f; for (int ndhw = tid; ndhw < NDHW; ndhw += THREADS) { int n = div64(ndhw, magic_DHW, shift_DHW); int dhw = ndhw - n*DHW; int i = n * CDHW + dhw; float dy = load(DY, i); db += dy; } // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) db += shfl_xor(db, i); // first thread of each warp store to shared if ((tid & 31) == 0) shareDB[tid >> 5] = db; __syncthreads(); if (tid < (THREADS>>5)) { // first warp loads all prior reductions db = shareDB[tid]; // reduce within this last warp #pragma unroll for (int i = (THREADS>>6); i > 0; i >>= 1) db += shfl_xor(db, i); // single thread outputs final reductions if (tid == 0) DB[c] = db; } } template <typename T> bool CWiseLinearAXPB_Forward(CUstream stream, T* y, const T* x, const float* a, const float* b, int N, int C, int DHW) { dim3 grid(C, N, 1); if (DHW < 128*8) cwise_linear_axpb_forward<T, 32><<<grid, 32, 0, stream>>>(y, x, a, b, C*DHW, DHW, a!=0, b!=0); else if (DHW < 512*8) cwise_linear_axpb_forward<T,128><<<grid, 128, 0, stream>>>(y, x, a, b, C*DHW, DHW, a!=0, b!=0); else cwise_linear_axpb_forward<T,512><<<grid, 512, 0, stream>>>(y, x, a, b, C*DHW, DHW, a!=0, b!=0); return true; // TODO } template <typename TX, typename TY> bool CWiseLinearAXPB_Backward(CUstream stream, TY* dx, float* da, float* db, const TY* dy, const TX* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW) { dim3 grid(C, 1, 1); if (NDHW < 256*8) cwise_linear_axpb_backward<TX,TY, 64><<<grid, 64, 0, stream>>>(dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); else if (NDHW < 1024*8) cwise_linear_axpb_backward<TX,TY, 256><<<grid, 256, 0, stream>>>(dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); else cwise_linear_axpb_backward<TX,TY,1024><<<grid, 1024, 0, stream>>>(dx, da, db, dy, x, a, C*DHW, NDHW, DHW, magic_DHW, shift_DHW, db!=0); return true; // TODO } template <typename T> bool CWiseLinearXPB_Backward(CUstream stream, float* db, const T* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW) { dim3 grid(C, 1, 1); if (NDHW < 256*8) cwise_linear_xpb_backward<T, 64><<<grid, 64, 0, stream>>>(db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); else if (NDHW < 1024*8) cwise_linear_xpb_backward<T, 256><<<grid, 256, 0, stream>>>(db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); else cwise_linear_xpb_backward<T,1024><<<grid, 1024, 0, stream>>>(db, dy, C*DHW, NDHW, DHW, magic_DHW, shift_DHW); return true; // TODO } template bool CWiseLinearAXPB_Forward<float>(CUstream stream, float* y, const float* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Forward<ehalf>(CUstream stream, ehalf* y, const ehalf* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Forward<bhalf>(CUstream stream, bhalf* y, const bhalf* x, const float* a, const float* b, int N, int C, int DHW); template bool CWiseLinearAXPB_Backward<float,float>(CUstream stream, float* dx, float* da, float* db, const float* dy, const float* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<ehalf,ehalf>(CUstream stream, ehalf* dx, float* da, float* db, const ehalf* dy, const ehalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<bhalf,bhalf>(CUstream stream, bhalf* dx, float* da, float* db, const bhalf* dy, const bhalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearAXPB_Backward<ehalf,float>(CUstream stream, float* dx, float* da, float* db, const float* dy, const ehalf* x, const float* a, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<float>(CUstream stream, float* db, const float* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<ehalf>(CUstream stream, float* db, const ehalf* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); template bool CWiseLinearXPB_Backward<bhalf>(CUstream stream, float* db, const bhalf* dy, int C, int NDHW, int DHW, int magic_DHW, int shift_DHW); #endif
7d4f0108631ffafeb796ec5d4709bb37a5875249.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdint> #include "oneflow/core/common/data_type.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/constantpad3d_kernel_util.h" namespace oneflow { namespace user_op { template<typename IN_T> __global__ void DoCUDAConstantPad3d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5> index_helper, int64_t elem_num, int64_t n_channel, int64_t y_depth, int64_t y_height, int64_t y_width, int64_t x_depth, int64_t x_height, int64_t x_width, int64_t pad_front, int64_t pad_left, int64_t pad_top, const IN_T const_value) { DoConstantPad3d<IN_T>(src, dest, index_helper, elem_num, n_channel, y_depth, y_height, y_width, x_depth, x_height, x_width, pad_front, pad_left, pad_top, const_value); }; template<typename IN_T> __global__ void DoCUDAConstantPad3dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5> index_helper, int64_t elem_num, int64_t n_channel, int64_t dy_depth, int64_t dy_height, int64_t dy_width, int64_t dx_depth, int64_t dx_height, int64_t dx_width, int64_t pad_front, int64_t pad_left, int64_t pad_top) { DoConstantPad3dGrad<IN_T>(src, dest, index_helper, elem_num, n_channel, dy_depth, dy_height, dy_width, dx_height, dx_depth, dx_width, pad_front, pad_left, pad_top); }; template<typename IN_T> struct ConstantPad3dFunctor<DeviceType::kGPU, IN_T> final { void operator()(DeviceCtx* ctx, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int64_t>& padding, IN_T constant_value) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; hipLaunchKernelGGL(( DoCUDAConstantPad3d<IN_T>), dim3(BlocksNum4ThreadsNum(y_shape.Count(0))), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), src, dest, index_helper, y_shape.Count(0), y_shape.At(c_idx), y_shape.At(d_idx), y_shape.At(h_idx), y_shape.At(w_idx), x_shape.At(d_idx), x_shape.At(h_idx), x_shape.At(w_idx), padding[4], padding[0], padding[2], constant_value); } }; // float16 implementation template<> void ConstantPad3dFunctor<DeviceType::kGPU, float16>::operator()( DeviceCtx* ctx, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int64_t>& padding, float16 constant_value) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; hipLaunchKernelGGL(( DoCUDAConstantPad3d<half>) , dim3(BlocksNum4ThreadsNum(y_shape.Count(0))), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, y_shape.Count(0), y_shape.At(c_idx), y_shape.At(d_idx), y_shape.At(h_idx), y_shape.At(w_idx), x_shape.At(d_idx), x_shape.At(h_idx), x_shape.At(w_idx), padding[4], padding[0], padding[2], static_cast<const half>(constant_value)); } template<typename IN_T> struct ConstantPad3dGradFunctor<DeviceType::kGPU, IN_T> final { void operator()(DeviceCtx* ctx, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& dy_shape, const ShapeView& dx_shape, const std::vector<int64_t>& padding) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; hipLaunchKernelGGL(( DoCUDAConstantPad3dGrad<IN_T>), dim3(BlocksNum4ThreadsNum(dy_shape.Count(0))), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), src, dest, index_helper, dy_shape.Count(0), dy_shape.At(c_idx), dy_shape.At(d_idx), dy_shape.At(h_idx), dy_shape.At(w_idx), dx_shape.At(d_idx), dx_shape.At(h_idx), dx_shape.At(w_idx), padding[4], padding[0], padding[2]); } }; // float16 implementation template<> void ConstantPad3dGradFunctor<DeviceType::kGPU, float16>::operator()( DeviceCtx* ctx, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& dy_shape, const ShapeView& dx_shape, const std::vector<int64_t>& padding) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; hipLaunchKernelGGL(( DoCUDAConstantPad3dGrad<half>) , dim3(BlocksNum4ThreadsNum(dy_shape.Count(0))), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, dy_shape.Count(0), dy_shape.At(c_idx), dy_shape.At(d_idx), dy_shape.At(h_idx), dy_shape.At(w_idx), dx_shape.At(d_idx), dx_shape.At(h_idx), dx_shape.At(w_idx), padding[4], padding[0], padding[2]); } OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_CONSTANT_PAD3D_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), PADDING_DATA_TYPE_GPU_SEQ); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_CONSTANT_PAD3D_GRAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), PADDING_DATA_TYPE_GPU_SEQ); } // namespace user_op } // namespace oneflow
7d4f0108631ffafeb796ec5d4709bb37a5875249.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdint> #include "oneflow/core/common/data_type.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/constantpad3d_kernel_util.h" namespace oneflow { namespace user_op { template<typename IN_T> __global__ void DoCUDAConstantPad3d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5> index_helper, int64_t elem_num, int64_t n_channel, int64_t y_depth, int64_t y_height, int64_t y_width, int64_t x_depth, int64_t x_height, int64_t x_width, int64_t pad_front, int64_t pad_left, int64_t pad_top, const IN_T const_value) { DoConstantPad3d<IN_T>(src, dest, index_helper, elem_num, n_channel, y_depth, y_height, y_width, x_depth, x_height, x_width, pad_front, pad_left, pad_top, const_value); }; template<typename IN_T> __global__ void DoCUDAConstantPad3dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5> index_helper, int64_t elem_num, int64_t n_channel, int64_t dy_depth, int64_t dy_height, int64_t dy_width, int64_t dx_depth, int64_t dx_height, int64_t dx_width, int64_t pad_front, int64_t pad_left, int64_t pad_top) { DoConstantPad3dGrad<IN_T>(src, dest, index_helper, elem_num, n_channel, dy_depth, dy_height, dy_width, dx_height, dx_depth, dx_width, pad_front, pad_left, pad_top); }; template<typename IN_T> struct ConstantPad3dFunctor<DeviceType::kGPU, IN_T> final { void operator()(DeviceCtx* ctx, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int64_t>& padding, IN_T constant_value) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; DoCUDAConstantPad3d<IN_T><<<BlocksNum4ThreadsNum(y_shape.Count(0)), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( src, dest, index_helper, y_shape.Count(0), y_shape.At(c_idx), y_shape.At(d_idx), y_shape.At(h_idx), y_shape.At(w_idx), x_shape.At(d_idx), x_shape.At(h_idx), x_shape.At(w_idx), padding[4], padding[0], padding[2], constant_value); } }; // float16 implementation template<> void ConstantPad3dFunctor<DeviceType::kGPU, float16>::operator()( DeviceCtx* ctx, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& x_shape, const ShapeView& y_shape, const std::vector<int64_t>& padding, float16 constant_value) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; DoCUDAConstantPad3d<half> <<<BlocksNum4ThreadsNum(y_shape.Count(0)), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, y_shape.Count(0), y_shape.At(c_idx), y_shape.At(d_idx), y_shape.At(h_idx), y_shape.At(w_idx), x_shape.At(d_idx), x_shape.At(h_idx), x_shape.At(w_idx), padding[4], padding[0], padding[2], static_cast<const half>(constant_value)); } template<typename IN_T> struct ConstantPad3dGradFunctor<DeviceType::kGPU, IN_T> final { void operator()(DeviceCtx* ctx, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& dy_shape, const ShapeView& dx_shape, const std::vector<int64_t>& padding) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; DoCUDAConstantPad3dGrad<IN_T><<<BlocksNum4ThreadsNum(dy_shape.Count(0)), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( src, dest, index_helper, dy_shape.Count(0), dy_shape.At(c_idx), dy_shape.At(d_idx), dy_shape.At(h_idx), dy_shape.At(w_idx), dx_shape.At(d_idx), dx_shape.At(h_idx), dx_shape.At(w_idx), padding[4], padding[0], padding[2]); } }; // float16 implementation template<> void ConstantPad3dGradFunctor<DeviceType::kGPU, float16>::operator()( DeviceCtx* ctx, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 5>& index_helper, const ShapeView& dy_shape, const ShapeView& dx_shape, const std::vector<int64_t>& padding) { const int64_t c_idx = 1; const int64_t d_idx = 2; const int64_t h_idx = 3; const int64_t w_idx = 4; DoCUDAConstantPad3dGrad<half> <<<BlocksNum4ThreadsNum(dy_shape.Count(0)), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, dy_shape.Count(0), dy_shape.At(c_idx), dy_shape.At(d_idx), dy_shape.At(h_idx), dy_shape.At(w_idx), dx_shape.At(d_idx), dx_shape.At(h_idx), dx_shape.At(w_idx), padding[4], padding[0], padding[2]); } OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_CONSTANT_PAD3D_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), PADDING_DATA_TYPE_GPU_SEQ); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_CONSTANT_PAD3D_GRAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), PADDING_DATA_TYPE_GPU_SEQ); } // namespace user_op } // namespace oneflow
172bb9385d01e58db4728ca0c744fc92df18ad99.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples #include <helper_timer.h> #include "cu_utils.h" #include "oddevensort.h" #include "cdpQuicksort.h" #include "cdpHeapsort.h" #define ARRAY_ELEMENTS_MAX 0x7fffffff //262144 int comp_int(const void *a, const void *b) { return *(int*)a - *(int*)b; } int main(const int argc, const char * argv[]) { unsigned int elemCount = 0; if (argc == 2) { elemCount = atoi(argv[1]); } else { printf("incorrect argument number.n"); return EXIT_FAILURE; } if (!(elemCount > 0 && elemCount <= ARRAY_ELEMENTS_MAX)) { printf("Supplied number of elements is out of bound %d.\n", ARRAY_ELEMENTS_MAX); return EXIT_FAILURE; } // device(0) : GTX 1080 // device(1) : GTX 750Ti hipSetDevice(0); int * A = new int[elemCount]; // A = (int*) malloc(sizeof(unsigned int) * elemCount ); if (A == NULL) { printf("malloc failed.\n"); fflush(stdout); return EXIT_FAILURE; } // setup dummy input srand(time(NULL)); for (unsigned int i = 0; i < elemCount; i++) { A[i] = rand() % 32; } if (elemCount <= 16) { for (unsigned int i = 0; i < elemCount; i++) { if (i < 70 || i == elemCount - 1) { printf("%4u ", i); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); } for (unsigned int i = 0; i < elemCount; i++) { if (i < 70 || i == elemCount - 1) { printf("%4d ", A[i]); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); printf("generated %u elements.\n\n", elemCount); fflush(stdout); // setup input copy on device mem int *devArray; unsigned devACapa = 128 * MAX(CDIV(elemCount,128),1); hipMalloc((void**)&devArray, sizeof(unsigned int) * devACapa); hipMemcpy(devArray, A, sizeof(unsigned int) * devACapa, hipMemcpyHostToDevice); cuStopWatch sw; printf("Sort by oddevensort_gmem..\n"); sw.reset(); sw.start(); cu_oddevensort_gmem(devArray, elemCount); hipDeviceSynchronize(); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); hipMemcpy(devArray, A, sizeof(unsigned int) * devACapa, hipMemcpyHostToDevice); printf("Sort by oddevensort_smem...\n"); sw.reset(); sw.start(); cu_oddevensort(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by qsort in stdlib...\n"); sw.reset(); sw.start(); qsort(A, elemCount, sizeof(int), comp_int); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by cdp_qsort...\n"); hipMemcpy(devArray, A, sizeof(unsigned int) * devACapa, hipMemcpyHostToDevice); sw.reset(); sw.start(); cdp_qsort(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by cdp_heapsort...\n"); hipMemcpy(devArray, A, sizeof(unsigned int) * devACapa, hipMemcpyHostToDevice); sw.reset(); sw.start(); cu_makeheap(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); hipMemcpy(A, devArray, sizeof(unsigned int) * devACapa, hipMemcpyDeviceToHost); int firstFailure = elemCount; for (unsigned int i = 0; i < elemCount; i++) { if (i < elemCount - 1) { if (A[i] > A[i + 1]) { firstFailure = i; } } if (i < 70 || i == elemCount - 1) { printf("%4u ", A[i]); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); if (firstFailure < elemCount) { printf("!!!Sort failure deteced at A[%d] = %d and A[%d] = %d!!!\n", firstFailure, A[firstFailure], firstFailure+1, A[firstFailure+1]); } hipFree(devArray); delete [] A; hipDeviceReset(); }
172bb9385d01e58db4728ca0c744fc92df18ad99.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples #include <helper_timer.h> #include "cu_utils.h" #include "oddevensort.h" #include "cdpQuicksort.h" #include "cdpHeapsort.h" #define ARRAY_ELEMENTS_MAX 0x7fffffff //262144 int comp_int(const void *a, const void *b) { return *(int*)a - *(int*)b; } int main(const int argc, const char * argv[]) { unsigned int elemCount = 0; if (argc == 2) { elemCount = atoi(argv[1]); } else { printf("incorrect argument number.n"); return EXIT_FAILURE; } if (!(elemCount > 0 && elemCount <= ARRAY_ELEMENTS_MAX)) { printf("Supplied number of elements is out of bound %d.\n", ARRAY_ELEMENTS_MAX); return EXIT_FAILURE; } // device(0) : GTX 1080 // device(1) : GTX 750Ti cudaSetDevice(0); int * A = new int[elemCount]; // A = (int*) malloc(sizeof(unsigned int) * elemCount ); if (A == NULL) { printf("malloc failed.\n"); fflush(stdout); return EXIT_FAILURE; } // setup dummy input srand(time(NULL)); for (unsigned int i = 0; i < elemCount; i++) { A[i] = rand() % 32; } if (elemCount <= 16) { for (unsigned int i = 0; i < elemCount; i++) { if (i < 70 || i == elemCount - 1) { printf("%4u ", i); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); } for (unsigned int i = 0; i < elemCount; i++) { if (i < 70 || i == elemCount - 1) { printf("%4d ", A[i]); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); printf("generated %u elements.\n\n", elemCount); fflush(stdout); // setup input copy on device mem int *devArray; unsigned devACapa = 128 * MAX(CDIV(elemCount,128),1); cudaMalloc((void**)&devArray, sizeof(unsigned int) * devACapa); cudaMemcpy(devArray, A, sizeof(unsigned int) * devACapa, cudaMemcpyHostToDevice); cuStopWatch sw; printf("Sort by oddevensort_gmem..\n"); sw.reset(); sw.start(); cu_oddevensort_gmem(devArray, elemCount); cudaDeviceSynchronize(); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); cudaMemcpy(devArray, A, sizeof(unsigned int) * devACapa, cudaMemcpyHostToDevice); printf("Sort by oddevensort_smem...\n"); sw.reset(); sw.start(); cu_oddevensort(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by qsort in stdlib...\n"); sw.reset(); sw.start(); qsort(A, elemCount, sizeof(int), comp_int); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by cdp_qsort...\n"); cudaMemcpy(devArray, A, sizeof(unsigned int) * devACapa, cudaMemcpyHostToDevice); sw.reset(); sw.start(); cdp_qsort(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); printf("Sort by cdp_heapsort...\n"); cudaMemcpy(devArray, A, sizeof(unsigned int) * devACapa, cudaMemcpyHostToDevice); sw.reset(); sw.start(); cu_makeheap(devArray, elemCount); sw.stop(); printf("Elapsed time %f msec.\n\n", (float)((int)(sw.timerValue() * 1000)) / 1000); cudaMemcpy(A, devArray, sizeof(unsigned int) * devACapa, cudaMemcpyDeviceToHost); int firstFailure = elemCount; for (unsigned int i = 0; i < elemCount; i++) { if (i < elemCount - 1) { if (A[i] > A[i + 1]) { firstFailure = i; } } if (i < 70 || i == elemCount - 1) { printf("%4u ", A[i]); } else if (i == elemCount - 2) { printf("... "); } } printf("\n"); if (firstFailure < elemCount) { printf("!!!Sort failure deteced at A[%d] = %d and A[%d] = %d!!!\n", firstFailure, A[firstFailure], firstFailure+1, A[firstFailure+1]); } cudaFree(devArray); delete [] A; cudaDeviceReset(); }
a027c6e4b49fb2e7c92c5f84e7a1ab23a813847b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "smoothcell.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( smoothcell), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( smoothcell), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( smoothcell), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a027c6e4b49fb2e7c92c5f84e7a1ab23a813847b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "smoothcell.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); smoothcell<<<gridBlock,threadBlock>>>(in,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { smoothcell<<<gridBlock,threadBlock>>>(in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { smoothcell<<<gridBlock,threadBlock>>>(in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8fe4398fc3fc0b54d15800530c06e2268ce53795.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../gtest.h" #include <algorithm> #include <vector> #include <backends/gpu/reduce_by_key.hpp> #include "gpu_vector.hpp" using namespace arb; template <typename T, typename I> __global__ void reduce_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; hipLaunchKernelGGL(( reduce_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, no_repetitions) { int n = 64; std::vector<int> index(n); for (int i=0; i<n; ++i) index[i] = i; { std::vector<float> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0f); } { std::vector<double> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0); } } TEST(reduce_by_key, single_repeated_index) { // Perform reduction of a sequence of 1s of length n // The expected result is n for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n, 1); std::vector<int> index(n, 0); auto out = reduce(in, 1, index, 32); EXPECT_EQ(double(n), out[0]); } // Perform reduction of an ascending sequence of {1,2,3,...,n} // The expected result is n*(n+1)/2 for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n); for (int i=0; i<n; ++i) in[i] = i+1; std::vector<int> index(n, 0); auto out = reduce(in, 1, index); EXPECT_EQ(out[0], double((n+1)*n/2)); } } TEST(reduce_by_key, scatter) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.}; EXPECT_EQ(n, expected.size()); auto out = reduce(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce(in, n, index, 7); EXPECT_EQ(expected, out); } // Test kernels that perform more than one reduction in a single invokation. // Used to reproduce and test for synchronization issues on V100 GPUs. template <typename T, typename I> __global__ void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; hipLaunchKernelGGL(( reduce_twice_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, scatter_twice) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.}; EXPECT_EQ(n, expected.size()); auto out = reduce_twice(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce_twice(in, n, index, 7); EXPECT_EQ(expected, out); }
8fe4398fc3fc0b54d15800530c06e2268ce53795.cu
#include "../gtest.h" #include <algorithm> #include <vector> #include <backends/gpu/reduce_by_key.hpp> #include "gpu_vector.hpp" using namespace arb; template <typename T, typename I> __global__ void reduce_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; reduce_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, no_repetitions) { int n = 64; std::vector<int> index(n); for (int i=0; i<n; ++i) index[i] = i; { std::vector<float> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0f); } { std::vector<double> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0); } } TEST(reduce_by_key, single_repeated_index) { // Perform reduction of a sequence of 1s of length n // The expected result is n for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n, 1); std::vector<int> index(n, 0); auto out = reduce(in, 1, index, 32); EXPECT_EQ(double(n), out[0]); } // Perform reduction of an ascending sequence of {1,2,3,...,n} // The expected result is n*(n+1)/2 for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n); for (int i=0; i<n; ++i) in[i] = i+1; std::vector<int> index(n, 0); auto out = reduce(in, 1, index); EXPECT_EQ(out[0], double((n+1)*n/2)); } } TEST(reduce_by_key, scatter) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.}; EXPECT_EQ(n, expected.size()); auto out = reduce(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce(in, n, index, 7); EXPECT_EQ(expected, out); } // Test kernels that perform more than one reduction in a single invokation. // Used to reproduce and test for synchronization issues on V100 GPUs. template <typename T, typename I> __global__ void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; reduce_twice_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, scatter_twice) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.}; EXPECT_EQ(n, expected.size()); auto out = reduce_twice(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce_twice(in, n, index, 7); EXPECT_EQ(expected, out); }
0096c4460ce91db8e1f12af608da1cedbc5c7eec.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ConvolutionRowGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_Dst = NULL; hipMalloc(&d_Dst, XSIZE*YSIZE); double *d_Src = NULL; hipMalloc(&d_Src, XSIZE*YSIZE); double *d_Filter = NULL; hipMalloc(&d_Filter, XSIZE*YSIZE); int imageW = 1; int imageH = 1; int filterR = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ConvolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ConvolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ConvolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0096c4460ce91db8e1f12af608da1cedbc5c7eec.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ConvolutionRowGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_Dst = NULL; cudaMalloc(&d_Dst, XSIZE*YSIZE); double *d_Src = NULL; cudaMalloc(&d_Src, XSIZE*YSIZE); double *d_Filter = NULL; cudaMalloc(&d_Filter, XSIZE*YSIZE); int imageW = 1; int imageH = 1; int filterR = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ConvolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ConvolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ConvolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
18b1bfacf3ae2223020f85c3da49e08e4a40c51b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void update_gpu( const float *r, float *du, float *u, float *u_sum, float *u_max) { *u += *du + alpha_cuda * (*r); *du = 0.0f; *u_sum += (*u) * (*u); *u_max = MAX(*u_max, *u); } // CUDA kernel function __global__ void op_cuda_update( const float *__restrict arg0, float *arg1, float *arg2, float *arg3, float *arg4, int set_size ) { float arg3_l[1]; for ( int d=0; d<1; d++ ){ arg3_l[d]=ZERO_float; } float arg4_l[1]; for ( int d=0; d<1; d++ ){ arg4_l[d]=arg4[d+blockIdx.x*1]; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3_l, arg4_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); } for ( int d=0; d<1; d++ ){ op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ float*arg3h = (float *)arg3.data; float*arg4h = (float *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg3.data)[d+b*1] = ZERO_float; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg4.data)[d+b*1] = arg4h[d]; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1]; } } arg3.data = (char *)arg3h; op_mpi_reduce(&arg3,arg3h); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]); } } arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size; OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f; }
18b1bfacf3ae2223020f85c3da49e08e4a40c51b.cu
// // auto-generated by op2.py // //user function __device__ void update_gpu( const float *r, float *du, float *u, float *u_sum, float *u_max) { *u += *du + alpha_cuda * (*r); *du = 0.0f; *u_sum += (*u) * (*u); *u_max = MAX(*u_max, *u); } // CUDA kernel function __global__ void op_cuda_update( const float *__restrict arg0, float *arg1, float *arg2, float *arg3, float *arg4, int set_size ) { float arg3_l[1]; for ( int d=0; d<1; d++ ){ arg3_l[d]=ZERO_float; } float arg4_l[1]; for ( int d=0; d<1; d++ ){ arg4_l[d]=arg4[d+blockIdx.x*1]; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*1, arg1+n*1, arg2+n*1, arg3_l, arg4_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); } for ( int d=0; d<1; d++ ){ op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ float*arg3h = (float *)arg3.data; float*arg4h = (float *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg3.data)[d+b*1] = ZERO_float; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg4.data)[d+b*1] = arg4h[d]; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; op_cuda_update<<<nblocks,nthread,nshared>>>( (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1]; } } arg3.data = (char *)arg3h; op_mpi_reduce(&arg3,arg3h); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]); } } arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size; OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f; }
dc037d682fbacffcbaa4f64f92264023ebe588c7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "concat.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); float *input1 = NULL; hipMalloc(&input1, XSIZE*YSIZE); float *input2 = NULL; hipMalloc(&input2, XSIZE*YSIZE); float *input3 = NULL; hipMalloc(&input3, XSIZE*YSIZE); float *input4 = NULL; hipMalloc(&input4, XSIZE*YSIZE); const int size = 1; const int in_channel1 = 1; const int in_channel2 = 1; const int in_channel3 = 1; const int in_channel4 = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( concat), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( concat), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( concat), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dc037d682fbacffcbaa4f64f92264023ebe588c7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "concat.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); float *input1 = NULL; cudaMalloc(&input1, XSIZE*YSIZE); float *input2 = NULL; cudaMalloc(&input2, XSIZE*YSIZE); float *input3 = NULL; cudaMalloc(&input3, XSIZE*YSIZE); float *input4 = NULL; cudaMalloc(&input4, XSIZE*YSIZE); const int size = 1; const int in_channel1 = 1; const int in_channel2 = 1; const int in_channel3 = 1; const int in_channel4 = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); concat<<<gridBlock,threadBlock>>>(output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { concat<<<gridBlock,threadBlock>>>(output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { concat<<<gridBlock,threadBlock>>>(output,input1,input2,input3,input4,size,in_channel1,in_channel2,in_channel3,in_channel4); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
33b048b203fd140018a08e83d3b0d4e1d8c989f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_mass_flux_y; int xdim0_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int ydim0_advec_mom_kernel_mass_flux_y; int ydim0_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int xdim1_advec_mom_kernel_mass_flux_y; int xdim1_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int ydim1_advec_mom_kernel_mass_flux_y; int ydim1_advec_mom_kernel_mass_flux_y_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_mass_flux_y * (y) + \ xdim0_advec_mom_kernel_mass_flux_y * ydim0_advec_mom_kernel_mass_flux_y * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_mass_flux_y * (y) + \ xdim1_advec_mom_kernel_mass_flux_y * ydim1_advec_mom_kernel_mass_flux_y * \ (z)) // user function __device__ inline void advec_mom_kernel_mass_flux_y(double *node_flux, const double *mass_flux_y) { node_flux[OPS_ACC0(0, 0, 0)] = 0.125 * (mass_flux_y[OPS_ACC1(-1, 0, 0)] + mass_flux_y[OPS_ACC1(0, 0, 0)] + mass_flux_y[OPS_ACC1(-1, 1, 0)] + mass_flux_y[OPS_ACC1(0, 1, 0)] + mass_flux_y[OPS_ACC1(-1, 0, -1)] + mass_flux_y[OPS_ACC1(0, 0, -1)] + mass_flux_y[OPS_ACC1(-1, 1, -1)] + mass_flux_y[OPS_ACC1(0, 1, -1)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_advec_mom_kernel_mass_flux_y(double *__restrict arg0, const double *__restrict arg1, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_y + idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_y * ydim0_advec_mom_kernel_mass_flux_y; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_y + idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_y * ydim1_advec_mom_kernel_mass_flux_y; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_mass_flux_y(arg0, arg1); } } // host stub function void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 29)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(29, "advec_mom_kernel_mass_flux_y"); OPS_kernels[29].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_mass_flux_y_h || ydim0 != ydim0_advec_mom_kernel_mass_flux_y_h || xdim1 != xdim1_advec_mom_kernel_mass_flux_y_h || ydim1 != ydim1_advec_mom_kernel_mass_flux_y_h) { hipMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_y, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_mass_flux_y_h = xdim0; hipMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_y, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_mass_flux_y_h = ydim0; hipMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_y, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_mass_flux_y_h = xdim1; hipMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_y, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_mass_flux_y_h = ydim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[29].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_mass_flux_y), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[29].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[29].mpi_time += t2 - t1; OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
33b048b203fd140018a08e83d3b0d4e1d8c989f9.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_mass_flux_y; int xdim0_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int ydim0_advec_mom_kernel_mass_flux_y; int ydim0_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int xdim1_advec_mom_kernel_mass_flux_y; int xdim1_advec_mom_kernel_mass_flux_y_h = -1; __constant__ int ydim1_advec_mom_kernel_mass_flux_y; int ydim1_advec_mom_kernel_mass_flux_y_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_mass_flux_y * (y) + \ xdim0_advec_mom_kernel_mass_flux_y * ydim0_advec_mom_kernel_mass_flux_y * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_mass_flux_y * (y) + \ xdim1_advec_mom_kernel_mass_flux_y * ydim1_advec_mom_kernel_mass_flux_y * \ (z)) // user function __device__ inline void advec_mom_kernel_mass_flux_y(double *node_flux, const double *mass_flux_y) { node_flux[OPS_ACC0(0, 0, 0)] = 0.125 * (mass_flux_y[OPS_ACC1(-1, 0, 0)] + mass_flux_y[OPS_ACC1(0, 0, 0)] + mass_flux_y[OPS_ACC1(-1, 1, 0)] + mass_flux_y[OPS_ACC1(0, 1, 0)] + mass_flux_y[OPS_ACC1(-1, 0, -1)] + mass_flux_y[OPS_ACC1(0, 0, -1)] + mass_flux_y[OPS_ACC1(-1, 1, -1)] + mass_flux_y[OPS_ACC1(0, 1, -1)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_advec_mom_kernel_mass_flux_y(double *__restrict arg0, const double *__restrict arg1, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_y + idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_y * ydim0_advec_mom_kernel_mass_flux_y; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_y + idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_y * ydim1_advec_mom_kernel_mass_flux_y; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_mass_flux_y(arg0, arg1); } } // host stub function void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 29)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(29, "advec_mom_kernel_mass_flux_y"); OPS_kernels[29].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_mass_flux_y_h || ydim0 != ydim0_advec_mom_kernel_mass_flux_y_h || xdim1 != xdim1_advec_mom_kernel_mass_flux_y_h || ydim1 != ydim1_advec_mom_kernel_mass_flux_y_h) { cudaMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_y, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_mass_flux_y_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_y, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_mass_flux_y_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_y, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_mass_flux_y_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_y, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_mass_flux_y_h = ydim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[29].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_mass_flux_y<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[29].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[29].mpi_time += t2 - t1; OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[29].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
e4948c5e87bad37c790879e65806551f8d7c782d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Modifications Copyright 2017 H2O.ai, Inc. */ // original code from https://github.com/NVIDIA/kmeans (Apache V2.0 License) #include "kmeans_labels.h" #include <rocblas.h> #include <cfloat> #include <unistd.h> #include "kmeans_general.h" hipStream_t cuda_stream[MAX_NGPUS]; namespace kmeans { namespace detail { template<typename T> struct absolute_value { __host__ __device__ void operator()(T &x) const { x = (x > 0 ? x : -x); } }; hipblasHandle_t cublas_handle[MAX_NGPUS]; void labels_init() { hipblasStatus_t stat; hipError_t err; int dev_num; safe_cuda(hipGetDevice(&dev_num)); stat = hipblasCreate(&detail::cublas_handle[dev_num]); if (stat != HIPBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS initialization failed" << std::endl; exit(1); } err = safe_cuda(hipStreamCreate(&cuda_stream[dev_num])); if (err != hipSuccess) { std::cout << "Stream creation failed" << std::endl; } hipblasSetStream(cublas_handle[dev_num], cuda_stream[dev_num]); mycub::cub_init(dev_num); } void labels_close() { int dev_num; safe_cuda(hipGetDevice(&dev_num)); safe_cublas(hipblasDestroy(cublas_handle[dev_num])); safe_cuda(hipStreamDestroy(cuda_stream[dev_num])); mycub::cub_close(dev_num); } void streamsync(int dev_num) { hipStreamSynchronize(cuda_stream[dev_num]); } /** * Matrix multiplication: alpha * A^T * B + beta * C * Optimized for tall and skinny matrices * * @tparam float_t * @param A * @param B * @param C * @param alpha * @param beta * @param n * @param d * @param k * @param max_block_rows * @return */ template<typename float_t> __global__ void matmul(const float_t *A, const float_t *B, float_t *C, const float_t alpha, const float_t beta, int n, int d, int k, int max_block_rows) { extern __shared__ __align__(sizeof(float_t)) unsigned char my_smem[]; float_t *shared = reinterpret_cast<float_t *>(my_smem); float_t *s_A = shared; float_t *s_B = shared + max_block_rows * d; for (int i = threadIdx.x; i < d * k; i += blockDim.x) { s_B[i] = B[i]; } int block_start_row_index = blockIdx.x * max_block_rows; int block_rows = max_block_rows; if (blockIdx.x == gridDim.x - 1 && n % max_block_rows != 0) { block_rows = n % max_block_rows; } for (int i = threadIdx.x; i < d * block_rows; i += blockDim.x) { s_A[i] = alpha * A[d * block_start_row_index + i]; } __syncthreads(); float_t elem_c = 0; int col_c = threadIdx.x % k; int abs_row_c = block_start_row_index + threadIdx.x / k; int row_c = threadIdx.x / k; // Thread/Block combination either too far for data array // Or is calculating for index that should be calculated in a different blocks - in some edge cases // "col_c * n + abs_row_c" can yield same result in different thread/block combinations if (abs_row_c >= n || threadIdx.x >= block_rows * k) { return; } for (int i = 0; i < d; i++) { elem_c += s_B[d * col_c + i] * s_A[d * row_c + i]; } C[col_c * n + abs_row_c] = beta * C[col_c * n + abs_row_c] + elem_c; } template<> void calculate_distances<double>(int verbose, int q, size_t n, int d, int k, thrust::device_vector<double> &data, size_t data_offset, thrust::device_vector<double> &centroids, thrust::device_vector<double> &data_dots, thrust::device_vector<double> &centroid_dots, thrust::device_vector<double> &pairwise_distances) { detail::make_self_dots(k, d, centroids, centroid_dots); detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 //The dgemm calculates x.y for all x and y, so alpha = -2.0 double alpha = -2.0; double beta = 1.0; //If the data were in standard column major order, we'd do a //centroids * data ^ T //But the data is in row major order, so we have to permute //the arguments a little int dev_num; safe_cuda(hipGetDevice(&dev_num)); if (k <= 16 && d <= 64) { const int BLOCK_SIZE_MUL = 128; int block_rows = ::min((size_t)BLOCK_SIZE_MUL / k, n); int grid_size = ::ceil(static_cast<double>(n) / block_rows); int shared_size_B = d * k * sizeof(double); int shared_size_A = block_rows * d * sizeof(double); matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( thrust::raw_pointer_cast(data.data() + data_offset * d), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(pairwise_distances.data()), alpha, beta, n, d, k, block_rows ); } else { hipblasStatus_t stat = safe_cublas(hipblasDgemm(detail::cublas_handle[dev_num], HIPBLAS_OP_T, HIPBLAS_OP_N, n, k, d, &alpha, thrust::raw_pointer_cast(data.data() + data_offset * d), d,//Has to be n or d thrust::raw_pointer_cast(centroids.data()), d,//Has to be k or d &beta, thrust::raw_pointer_cast(pairwise_distances.data()), n)); //Has to be n or k if (stat != HIPBLAS_STATUS_SUCCESS) { std::cout << "Invalid Dgemm" << std::endl; exit(1); } } thrust::for_each(pairwise_distances.begin(), pairwise_distances.end(), absolute_value<double>()); // in-place transformation to ensure all distances are positive indefinite #if(CHECK) gpuErrchk(hipGetLastError()); #endif } template<> void calculate_distances<float>(int verbose, int q, size_t n, int d, int k, thrust::device_vector<float> &data, size_t data_offset, thrust::device_vector<float> &centroids, thrust::device_vector<float> &data_dots, thrust::device_vector<float> &centroid_dots, thrust::device_vector<float> &pairwise_distances) { detail::make_self_dots(k, d, centroids, centroid_dots); detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 //The dgemm calculates x.y for all x and y, so alpha = -2.0 float alpha = -2.0; float beta = 1.0; //If the data were in standard column major order, we'd do a //centroids * data ^ T //But the data is in row major order, so we have to permute //the arguments a little int dev_num; safe_cuda(hipGetDevice(&dev_num)); if (k <= 16 && d <= 64) { const int BLOCK_SIZE_MUL = 128; int block_rows = ::min((size_t)BLOCK_SIZE_MUL / k, n); int grid_size = ::ceil(static_cast<float>(n) / block_rows); int shared_size_B = d * k * sizeof(float); int shared_size_A = block_rows * d * sizeof(float); matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( thrust::raw_pointer_cast(data.data() + data_offset * d), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(pairwise_distances.data()), alpha, beta, n, d, k, block_rows ); } else { hipblasStatus_t stat = safe_cublas(hipblasSgemm(detail::cublas_handle[dev_num], HIPBLAS_OP_T, HIPBLAS_OP_N, n, k, d, &alpha, thrust::raw_pointer_cast(data.data() + data_offset * d), d,//Has to be n or d thrust::raw_pointer_cast(centroids.data()), d,//Has to be k or d &beta, thrust::raw_pointer_cast(pairwise_distances.data()), n)); //Has to be n or k if (stat != HIPBLAS_STATUS_SUCCESS) { std::cout << "Invalid Sgemm" << std::endl; exit(1); } } thrust::for_each(pairwise_distances.begin(), pairwise_distances.end(), absolute_value<float>()); // in-place transformation to ensure all distances are positive indefinite #if(CHECK) gpuErrchk(hipGetLastError()); #endif } } } namespace mycub { void *d_key_alt_buf[MAX_NGPUS]; unsigned int key_alt_buf_bytes[MAX_NGPUS]; void *d_value_alt_buf[MAX_NGPUS]; unsigned int value_alt_buf_bytes[MAX_NGPUS]; void *d_temp_storage[MAX_NGPUS]; size_t temp_storage_bytes[MAX_NGPUS]; void *d_temp_storage2[MAX_NGPUS]; size_t temp_storage_bytes2[MAX_NGPUS]; bool cub_initted; void cub_init() { // std::cout <<"CUB init" << std::endl; for (int q = 0; q < MAX_NGPUS; q++) { d_key_alt_buf[q] = NULL; key_alt_buf_bytes[q] = 0; d_value_alt_buf[q] = NULL; value_alt_buf_bytes[q] = 0; d_temp_storage[q] = NULL; temp_storage_bytes[q] = 0; d_temp_storage2[q] = NULL; temp_storage_bytes2[q] = 0; } cub_initted = true; } void cub_init(int dev) { d_key_alt_buf[dev] = NULL; key_alt_buf_bytes[dev] = 0; d_value_alt_buf[dev] = NULL; value_alt_buf_bytes[dev] = 0; d_temp_storage[dev] = NULL; temp_storage_bytes[dev] = 0; d_temp_storage2[dev] = NULL; temp_storage_bytes2[dev] = 0; } void cub_close() { for (int q = 0; q < MAX_NGPUS; q++) { if (d_key_alt_buf[q]) safe_cuda(hipFree(d_key_alt_buf[q])); if (d_value_alt_buf[q]) safe_cuda(hipFree(d_value_alt_buf[q])); if (d_temp_storage[q]) safe_cuda(hipFree(d_temp_storage[q])); if (d_temp_storage2[q]) safe_cuda(hipFree(d_temp_storage2[q])); d_temp_storage[q] = NULL; d_temp_storage2[q] = NULL; } cub_initted = false; } void cub_close(int dev) { if (d_key_alt_buf[dev]) safe_cuda(hipFree(d_key_alt_buf[dev])); if (d_value_alt_buf[dev]) safe_cuda(hipFree(d_value_alt_buf[dev])); if (d_temp_storage[dev]) safe_cuda(hipFree(d_temp_storage[dev])); if (d_temp_storage2[dev]) safe_cuda(hipFree(d_temp_storage2[dev])); d_temp_storage[dev] = NULL; d_temp_storage2[dev] = NULL; } void sort_by_key_int(thrust::device_vector<int> &keys, thrust::device_vector<int> &values) { int dev_num; safe_cuda(hipGetDevice(&dev_num)); hipStream_t this_stream = cuda_stream[dev_num]; int SIZE = keys.size(); //int *d_key_alt_buf, *d_value_alt_buf; if (key_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { if (d_key_alt_buf[dev_num]) safe_cuda(hipFree(d_key_alt_buf[dev_num])); safe_cuda(hipMalloc(&d_key_alt_buf[dev_num], sizeof(int) * SIZE)); key_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; } if (value_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { if (d_value_alt_buf[dev_num]) safe_cuda(hipFree(d_value_alt_buf[dev_num])); safe_cuda(hipMalloc(&d_value_alt_buf[dev_num], sizeof(int) * SIZE)); value_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; } cub::DoubleBuffer<int> d_keys(thrust::raw_pointer_cast(keys.data()), (int *) d_key_alt_buf[dev_num]); cub::DoubleBuffer<int> d_values(thrust::raw_pointer_cast(values.data()), (int *) d_value_alt_buf[dev_num]); // Determine temporary device storage requirements for sorting operation if (!d_temp_storage[dev_num]) { hipcub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], d_keys, d_values, SIZE, 0, sizeof(int) * 8, this_stream); // Allocate temporary storage for sorting operation safe_cuda(hipMalloc(&d_temp_storage[dev_num], temp_storage_bytes[dev_num])); } // Run sorting operation hipcub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], d_keys, d_values, SIZE, 0, sizeof(int) * 8, this_stream); // Sorted keys and values are referenced by d_keys.Current() and d_values.Current() keys.data() = thrust::device_pointer_cast(d_keys.Current()); values.data() = thrust::device_pointer_cast(d_values.Current()); } }
e4948c5e87bad37c790879e65806551f8d7c782d.cu
/*! * Modifications Copyright 2017 H2O.ai, Inc. */ // original code from https://github.com/NVIDIA/kmeans (Apache V2.0 License) #include "kmeans_labels.h" #include <cublas_v2.h> #include <cfloat> #include <unistd.h> #include "kmeans_general.h" cudaStream_t cuda_stream[MAX_NGPUS]; namespace kmeans { namespace detail { template<typename T> struct absolute_value { __host__ __device__ void operator()(T &x) const { x = (x > 0 ? x : -x); } }; cublasHandle_t cublas_handle[MAX_NGPUS]; void labels_init() { cublasStatus_t stat; cudaError_t err; int dev_num; safe_cuda(cudaGetDevice(&dev_num)); stat = cublasCreate(&detail::cublas_handle[dev_num]); if (stat != CUBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS initialization failed" << std::endl; exit(1); } err = safe_cuda(cudaStreamCreate(&cuda_stream[dev_num])); if (err != cudaSuccess) { std::cout << "Stream creation failed" << std::endl; } cublasSetStream(cublas_handle[dev_num], cuda_stream[dev_num]); mycub::cub_init(dev_num); } void labels_close() { int dev_num; safe_cuda(cudaGetDevice(&dev_num)); safe_cublas(cublasDestroy(cublas_handle[dev_num])); safe_cuda(cudaStreamDestroy(cuda_stream[dev_num])); mycub::cub_close(dev_num); } void streamsync(int dev_num) { cudaStreamSynchronize(cuda_stream[dev_num]); } /** * Matrix multiplication: alpha * A^T * B + beta * C * Optimized for tall and skinny matrices * * @tparam float_t * @param A * @param B * @param C * @param alpha * @param beta * @param n * @param d * @param k * @param max_block_rows * @return */ template<typename float_t> __global__ void matmul(const float_t *A, const float_t *B, float_t *C, const float_t alpha, const float_t beta, int n, int d, int k, int max_block_rows) { extern __shared__ __align__(sizeof(float_t)) unsigned char my_smem[]; float_t *shared = reinterpret_cast<float_t *>(my_smem); float_t *s_A = shared; float_t *s_B = shared + max_block_rows * d; for (int i = threadIdx.x; i < d * k; i += blockDim.x) { s_B[i] = B[i]; } int block_start_row_index = blockIdx.x * max_block_rows; int block_rows = max_block_rows; if (blockIdx.x == gridDim.x - 1 && n % max_block_rows != 0) { block_rows = n % max_block_rows; } for (int i = threadIdx.x; i < d * block_rows; i += blockDim.x) { s_A[i] = alpha * A[d * block_start_row_index + i]; } __syncthreads(); float_t elem_c = 0; int col_c = threadIdx.x % k; int abs_row_c = block_start_row_index + threadIdx.x / k; int row_c = threadIdx.x / k; // Thread/Block combination either too far for data array // Or is calculating for index that should be calculated in a different blocks - in some edge cases // "col_c * n + abs_row_c" can yield same result in different thread/block combinations if (abs_row_c >= n || threadIdx.x >= block_rows * k) { return; } for (int i = 0; i < d; i++) { elem_c += s_B[d * col_c + i] * s_A[d * row_c + i]; } C[col_c * n + abs_row_c] = beta * C[col_c * n + abs_row_c] + elem_c; } template<> void calculate_distances<double>(int verbose, int q, size_t n, int d, int k, thrust::device_vector<double> &data, size_t data_offset, thrust::device_vector<double> &centroids, thrust::device_vector<double> &data_dots, thrust::device_vector<double> &centroid_dots, thrust::device_vector<double> &pairwise_distances) { detail::make_self_dots(k, d, centroids, centroid_dots); detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 //The dgemm calculates x.y for all x and y, so alpha = -2.0 double alpha = -2.0; double beta = 1.0; //If the data were in standard column major order, we'd do a //centroids * data ^ T //But the data is in row major order, so we have to permute //the arguments a little int dev_num; safe_cuda(cudaGetDevice(&dev_num)); if (k <= 16 && d <= 64) { const int BLOCK_SIZE_MUL = 128; int block_rows = std::min((size_t)BLOCK_SIZE_MUL / k, n); int grid_size = std::ceil(static_cast<double>(n) / block_rows); int shared_size_B = d * k * sizeof(double); int shared_size_A = block_rows * d * sizeof(double); matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( thrust::raw_pointer_cast(data.data() + data_offset * d), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(pairwise_distances.data()), alpha, beta, n, d, k, block_rows ); } else { cublasStatus_t stat = safe_cublas(cublasDgemm(detail::cublas_handle[dev_num], CUBLAS_OP_T, CUBLAS_OP_N, n, k, d, &alpha, thrust::raw_pointer_cast(data.data() + data_offset * d), d,//Has to be n or d thrust::raw_pointer_cast(centroids.data()), d,//Has to be k or d &beta, thrust::raw_pointer_cast(pairwise_distances.data()), n)); //Has to be n or k if (stat != CUBLAS_STATUS_SUCCESS) { std::cout << "Invalid Dgemm" << std::endl; exit(1); } } thrust::for_each(pairwise_distances.begin(), pairwise_distances.end(), absolute_value<double>()); // in-place transformation to ensure all distances are positive indefinite #if(CHECK) gpuErrchk(cudaGetLastError()); #endif } template<> void calculate_distances<float>(int verbose, int q, size_t n, int d, int k, thrust::device_vector<float> &data, size_t data_offset, thrust::device_vector<float> &centroids, thrust::device_vector<float> &data_dots, thrust::device_vector<float> &centroid_dots, thrust::device_vector<float> &pairwise_distances) { detail::make_self_dots(k, d, centroids, centroid_dots); detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 //The dgemm calculates x.y for all x and y, so alpha = -2.0 float alpha = -2.0; float beta = 1.0; //If the data were in standard column major order, we'd do a //centroids * data ^ T //But the data is in row major order, so we have to permute //the arguments a little int dev_num; safe_cuda(cudaGetDevice(&dev_num)); if (k <= 16 && d <= 64) { const int BLOCK_SIZE_MUL = 128; int block_rows = std::min((size_t)BLOCK_SIZE_MUL / k, n); int grid_size = std::ceil(static_cast<float>(n) / block_rows); int shared_size_B = d * k * sizeof(float); int shared_size_A = block_rows * d * sizeof(float); matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( thrust::raw_pointer_cast(data.data() + data_offset * d), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(pairwise_distances.data()), alpha, beta, n, d, k, block_rows ); } else { cublasStatus_t stat = safe_cublas(cublasSgemm(detail::cublas_handle[dev_num], CUBLAS_OP_T, CUBLAS_OP_N, n, k, d, &alpha, thrust::raw_pointer_cast(data.data() + data_offset * d), d,//Has to be n or d thrust::raw_pointer_cast(centroids.data()), d,//Has to be k or d &beta, thrust::raw_pointer_cast(pairwise_distances.data()), n)); //Has to be n or k if (stat != CUBLAS_STATUS_SUCCESS) { std::cout << "Invalid Sgemm" << std::endl; exit(1); } } thrust::for_each(pairwise_distances.begin(), pairwise_distances.end(), absolute_value<float>()); // in-place transformation to ensure all distances are positive indefinite #if(CHECK) gpuErrchk(cudaGetLastError()); #endif } } } namespace mycub { void *d_key_alt_buf[MAX_NGPUS]; unsigned int key_alt_buf_bytes[MAX_NGPUS]; void *d_value_alt_buf[MAX_NGPUS]; unsigned int value_alt_buf_bytes[MAX_NGPUS]; void *d_temp_storage[MAX_NGPUS]; size_t temp_storage_bytes[MAX_NGPUS]; void *d_temp_storage2[MAX_NGPUS]; size_t temp_storage_bytes2[MAX_NGPUS]; bool cub_initted; void cub_init() { // std::cout <<"CUB init" << std::endl; for (int q = 0; q < MAX_NGPUS; q++) { d_key_alt_buf[q] = NULL; key_alt_buf_bytes[q] = 0; d_value_alt_buf[q] = NULL; value_alt_buf_bytes[q] = 0; d_temp_storage[q] = NULL; temp_storage_bytes[q] = 0; d_temp_storage2[q] = NULL; temp_storage_bytes2[q] = 0; } cub_initted = true; } void cub_init(int dev) { d_key_alt_buf[dev] = NULL; key_alt_buf_bytes[dev] = 0; d_value_alt_buf[dev] = NULL; value_alt_buf_bytes[dev] = 0; d_temp_storage[dev] = NULL; temp_storage_bytes[dev] = 0; d_temp_storage2[dev] = NULL; temp_storage_bytes2[dev] = 0; } void cub_close() { for (int q = 0; q < MAX_NGPUS; q++) { if (d_key_alt_buf[q]) safe_cuda(cudaFree(d_key_alt_buf[q])); if (d_value_alt_buf[q]) safe_cuda(cudaFree(d_value_alt_buf[q])); if (d_temp_storage[q]) safe_cuda(cudaFree(d_temp_storage[q])); if (d_temp_storage2[q]) safe_cuda(cudaFree(d_temp_storage2[q])); d_temp_storage[q] = NULL; d_temp_storage2[q] = NULL; } cub_initted = false; } void cub_close(int dev) { if (d_key_alt_buf[dev]) safe_cuda(cudaFree(d_key_alt_buf[dev])); if (d_value_alt_buf[dev]) safe_cuda(cudaFree(d_value_alt_buf[dev])); if (d_temp_storage[dev]) safe_cuda(cudaFree(d_temp_storage[dev])); if (d_temp_storage2[dev]) safe_cuda(cudaFree(d_temp_storage2[dev])); d_temp_storage[dev] = NULL; d_temp_storage2[dev] = NULL; } void sort_by_key_int(thrust::device_vector<int> &keys, thrust::device_vector<int> &values) { int dev_num; safe_cuda(cudaGetDevice(&dev_num)); cudaStream_t this_stream = cuda_stream[dev_num]; int SIZE = keys.size(); //int *d_key_alt_buf, *d_value_alt_buf; if (key_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { if (d_key_alt_buf[dev_num]) safe_cuda(cudaFree(d_key_alt_buf[dev_num])); safe_cuda(cudaMalloc(&d_key_alt_buf[dev_num], sizeof(int) * SIZE)); key_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; } if (value_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { if (d_value_alt_buf[dev_num]) safe_cuda(cudaFree(d_value_alt_buf[dev_num])); safe_cuda(cudaMalloc(&d_value_alt_buf[dev_num], sizeof(int) * SIZE)); value_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; } cub::DoubleBuffer<int> d_keys(thrust::raw_pointer_cast(keys.data()), (int *) d_key_alt_buf[dev_num]); cub::DoubleBuffer<int> d_values(thrust::raw_pointer_cast(values.data()), (int *) d_value_alt_buf[dev_num]); // Determine temporary device storage requirements for sorting operation if (!d_temp_storage[dev_num]) { cub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], d_keys, d_values, SIZE, 0, sizeof(int) * 8, this_stream); // Allocate temporary storage for sorting operation safe_cuda(cudaMalloc(&d_temp_storage[dev_num], temp_storage_bytes[dev_num])); } // Run sorting operation cub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], d_keys, d_values, SIZE, 0, sizeof(int) * 8, this_stream); // Sorted keys and values are referenced by d_keys.Current() and d_values.Current() keys.data() = thrust::device_pointer_cast(d_keys.Current()); values.data() = thrust::device_pointer_cast(d_values.Current()); } }
03e421ee9f4066e91e209a88526cb4cab6e3598b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/Activation.cuh" #include <vector> /* ---------------------------------------------- maxGPU Parameters: a - double b - double Finds max of a and b and returns it Returns: max(a, b) ---------------------------------------------- */ __device__ double maxGPU(double a, double b) { bool sel = (a <= b); return (double)(sel) * b + (double)(1 - sel) * a; } // end maxGPU /* ---------------------------------------------- binaryStep Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies binaryStep to every element of x ---------------------------------------------- */ __global__ void binaryStep(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = (int)(x[idx] >= 0) % 2; // if val is 0, its 1 ; if < 0, its 0 } // end binaryStep /* ---------------------------------------------- binaryStepGPU Parameters: z - vector to apply activation to, can be matrix in row-major form calls binaryStep cuda kernel on z.data() Returns: a - activated z, (f(z)) ---------------------------------------------- */ std::vector<double> binaryStepGPU(std::vector<double>& z) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); hipMalloc((void **) &d_z, z.size() * sizeof(double)); hipMemcpy(d_z, z.data(), z.size() * sizeof(double), hipMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); hipLaunchKernelGGL(( binaryStep), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); hipDeviceSynchronize(); hipMemcpy(a.data(), d_z, z.size() * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_z); return a; } // binaryStepGPU /* ---------------------------------------------- sigmoid Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies sigmoid (1/(1 + exp(-x))) to every element of x ---------------------------------------------- */ // exp(x) returns e^x ; its a cuda library function __global__ void sigmoid(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = 1 / (1 + exp(-1 * x[idx])); } // end sigmoid /* ---------------------------------------------- sigmoid_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies sigmoidPrime (exp(-x)(1 + exp(-x))^(-2)) to every element of x ---------------------------------------------- */ __global__ void sigmoid_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = exp(-1 * x[idx]) / ((1 + exp(-1 * x[idx])) * (1 + exp(-1 * x[idx]))); } // end sigmoid_prime /* ---------------------------------------------- sigmoidGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls sigmoid or sigmoid_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> sigmoidGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); hipMalloc((void **) &d_z, z.size() * sizeof(double)); hipMemcpy(d_z, z.data(), z.size() * sizeof(double), hipMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) hipLaunchKernelGGL(( sigmoid), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); else hipLaunchKernelGGL(( sigmoid_prime), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); hipDeviceSynchronize(); hipMemcpy(a.data(), d_z, z.size() * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_z); return a; } // end sigmoidGPU /* ---------------------------------------------- relu Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies relu (x if x > 0, else 0) to every element of x ---------------------------------------------- */ __global__ void relu(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = maxGPU(x[idx], 0); // if val > 0, return itself ; if 0, return 0 } // end relu /* ---------------------------------------------- relu_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies relu_prime (1 if x > 0, else 0) to every element of x ---------------------------------------------- */ __global__ void relu_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = (x[idx] > 0); // if val > 0, 1 ; if 0, 0 } // end relu /* ---------------------------------------------- reluGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls relu or relu_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> reluGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); hipMalloc((void **) &d_z, z.size() * sizeof(double)); hipMemcpy(d_z, z.data(), z.size() * sizeof(double), hipMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) hipLaunchKernelGGL(( relu), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); else hipLaunchKernelGGL(( relu_prime), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); hipDeviceSynchronize(); hipMemcpy(a.data(), d_z, z.size() * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_z); return a; } // end reluGPU /* ---------------------------------------------- leakyRelu Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies leakyRelu (x if x > 0, else 0.05x) to every element of x ---------------------------------------------- */ __global__ void leakyRelu(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = maxGPU(x[idx], 0.05 * x[idx]); } // end leakyRelu /* ---------------------------------------------- leakyRelu_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies leakyRelu_prime (1 if x > 0, else 0.05) to every element of x ---------------------------------------------- */ __global__ void leakyRelu_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; bool size = x[idx] > 0; x[idx] = size * x[idx] + (1 - size) * 0.05; } // end leakyRelu /* ---------------------------------------------- leakyReluGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls leakyRelu or leakyRelu_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> leakyReluGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); hipMalloc((void **) &d_z, z.size() * sizeof(double)); hipMemcpy(d_z, z.data(), z.size() * sizeof(double), hipMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) hipLaunchKernelGGL(( leakyRelu), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); else hipLaunchKernelGGL(( leakyRelu_prime), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); hipDeviceSynchronize(); hipMemcpy(a.data(), d_z, z.size() * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_z); return a; } // end leakyReluGPU __global__ void exponential(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = exp(x[idx]); } // end exponential std::vector<double> exponentialGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); hipMalloc((void **) &d_z, z.size() * sizeof(double)); hipMemcpy(d_z, z.data(), z.size() * sizeof(double), hipMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) hipLaunchKernelGGL(( exponential), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); else hipLaunchKernelGGL(( exponential), dim3(GRID), dim3(BLOCK), 0, 0, d_z, z.size()); hipDeviceSynchronize(); hipMemcpy(a.data(), d_z, z.size() * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_z); return a; } // end exponentialGPU
03e421ee9f4066e91e209a88526cb4cab6e3598b.cu
#include "../include/Activation.cuh" #include <vector> /* ---------------------------------------------- maxGPU Parameters: a - double b - double Finds max of a and b and returns it Returns: max(a, b) ---------------------------------------------- */ __device__ double maxGPU(double a, double b) { bool sel = (a <= b); return (double)(sel) * b + (double)(1 - sel) * a; } // end maxGPU /* ---------------------------------------------- binaryStep Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies binaryStep to every element of x ---------------------------------------------- */ __global__ void binaryStep(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = (int)(x[idx] >= 0) % 2; // if val is ≥ 0, its 1 ; if < 0, its 0 } // end binaryStep /* ---------------------------------------------- binaryStepGPU Parameters: z - vector to apply activation to, can be matrix in row-major form calls binaryStep cuda kernel on z.data() Returns: a - activated z, (f(z)) ---------------------------------------------- */ std::vector<double> binaryStepGPU(std::vector<double>& z) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); cudaMalloc((void **) &d_z, z.size() * sizeof(double)); cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); binaryStep<<<GRID, BLOCK, 0>>>(d_z, z.size()); cudaDeviceSynchronize(); cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_z); return a; } // binaryStepGPU /* ---------------------------------------------- sigmoid Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies sigmoid (1/(1 + exp(-x))) to every element of x ---------------------------------------------- */ // exp(x) returns e^x ; its a cuda library function __global__ void sigmoid(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = 1 / (1 + exp(-1 * x[idx])); } // end sigmoid /* ---------------------------------------------- sigmoid_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies sigmoidPrime (exp(-x)(1 + exp(-x))^(-2)) to every element of x ---------------------------------------------- */ __global__ void sigmoid_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = exp(-1 * x[idx]) / ((1 + exp(-1 * x[idx])) * (1 + exp(-1 * x[idx]))); } // end sigmoid_prime /* ---------------------------------------------- sigmoidGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls sigmoid or sigmoid_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> sigmoidGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); cudaMalloc((void **) &d_z, z.size() * sizeof(double)); cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) sigmoid<<<GRID, BLOCK, 0>>>(d_z, z.size()); else sigmoid_prime<<<GRID, BLOCK, 0>>>(d_z, z.size()); cudaDeviceSynchronize(); cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_z); return a; } // end sigmoidGPU /* ---------------------------------------------- relu Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies relu (x if x > 0, else 0) to every element of x ---------------------------------------------- */ __global__ void relu(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = maxGPU(x[idx], 0); // if val > 0, return itself ; if ≤ 0, return 0 } // end relu /* ---------------------------------------------- relu_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies relu_prime (1 if x > 0, else 0) to every element of x ---------------------------------------------- */ __global__ void relu_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = (x[idx] > 0); // if val > 0, 1 ; if ≤ 0, 0 } // end relu /* ---------------------------------------------- reluGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls relu or relu_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> reluGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); cudaMalloc((void **) &d_z, z.size() * sizeof(double)); cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) relu<<<GRID, BLOCK, 0>>>(d_z, z.size()); else relu_prime<<<GRID, BLOCK, 0>>>(d_z, z.size()); cudaDeviceSynchronize(); cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_z); return a; } // end reluGPU /* ---------------------------------------------- leakyRelu Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies leakyRelu (x if x > 0, else 0.05x) to every element of x ---------------------------------------------- */ __global__ void leakyRelu(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = maxGPU(x[idx], 0.05 * x[idx]); } // end leakyRelu /* ---------------------------------------------- leakyRelu_prime Parameters: x - vector to apply activation to, can be matrix in row-major form len - length of x Applies leakyRelu_prime (1 if x > 0, else 0.05) to every element of x ---------------------------------------------- */ __global__ void leakyRelu_prime(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; bool size = x[idx] > 0; x[idx] = size * x[idx] + (1 - size) * 0.05; } // end leakyRelu /* ---------------------------------------------- leakyReluGPU Parameters: z - vector to apply activation to, can be matrix in row-major form diff - bool determining whether to applu sig or sig_prime calls leakyRelu or leakyRelu_prime cuda kernel on z.data() Returns: a - activated z, (f(z) or f'(z)) ---------------------------------------------- */ std::vector<double> leakyReluGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); cudaMalloc((void **) &d_z, z.size() * sizeof(double)); cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) leakyRelu<<<GRID, BLOCK, 0>>>(d_z, z.size()); else leakyRelu_prime<<<GRID, BLOCK, 0>>>(d_z, z.size()); cudaDeviceSynchronize(); cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_z); return a; } // end leakyReluGPU __global__ void exponential(double *x, int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; x[idx] = exp(x[idx]); } // end exponential std::vector<double> exponentialGPU(std::vector<double>& z, bool diff) { double *d_z; std::vector<double> a(z.size()); int BLOCKSIZE = z.size() >= 512 ? 512 : z.size(); cudaMalloc((void **) &d_z, z.size() * sizeof(double)); cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice); dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE); dim3 BLOCK(BLOCKSIZE); if (!diff) exponential<<<GRID, BLOCK, 0>>>(d_z, z.size()); else exponential<<<GRID, BLOCK, 0>>>(d_z, z.size()); cudaDeviceSynchronize(); cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_z); return a; } // end exponentialGPU
bb5cea1d8640c1c64aaaa4ea302a75ddf58816d9.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix_hip.cuh" __device__ matrix_list_t* device_matrix_list_constructor(buffer_t* buffer, unsigned int num) { matrix_list_t* list = (matrix_list_t*)buffer_malloc(buffer, sizeof(matrix_list_t)); list->num = num; list->matrix_list = (matrix_t**)buffer_malloc(buffer, sizeof(matrix_t*) * num); return list; } __device__ matrix_list_t* device_matrix_list_add(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2) { //assert(m1->num == m2->num); matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_add(buffer, m1->matrix_list[i], m2->matrix_list[i]); } return m; } __device__ matrix_list_t* device_matrix_list_subtract(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2) { //assert(m1->num == m2->num); matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_subtract(buffer, m1->matrix_list[i], m2->matrix_list[i]); } return m; } __device__ matrix_list_t* device_matrix_list_scalar_multiply(buffer_t* buffer, matrix_list_t* m1, float scalar) { matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_scalar_multiply(buffer, m1->matrix_list[i], scalar); } return m; } __device__ void device_free_matrix_list(matrix_list_t* m) { }
bb5cea1d8640c1c64aaaa4ea302a75ddf58816d9.cu
#include "matrix.cuh" __device__ matrix_list_t* device_matrix_list_constructor(buffer_t* buffer, unsigned int num) { matrix_list_t* list = (matrix_list_t*)buffer_malloc(buffer, sizeof(matrix_list_t)); list->num = num; list->matrix_list = (matrix_t**)buffer_malloc(buffer, sizeof(matrix_t*) * num); return list; } __device__ matrix_list_t* device_matrix_list_add(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2) { //assert(m1->num == m2->num); matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_add(buffer, m1->matrix_list[i], m2->matrix_list[i]); } return m; } __device__ matrix_list_t* device_matrix_list_subtract(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2) { //assert(m1->num == m2->num); matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_subtract(buffer, m1->matrix_list[i], m2->matrix_list[i]); } return m; } __device__ matrix_list_t* device_matrix_list_scalar_multiply(buffer_t* buffer, matrix_list_t* m1, float scalar) { matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num); int i; for(i=0; i<m1->num; i++) { m->matrix_list[i] = device_matrix_scalar_multiply(buffer, m1->matrix_list[i], scalar); } return m; } __device__ void device_free_matrix_list(matrix_list_t* m) { }
110b8209d3620372e6bf93d56abe3b672015b395.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/detail/device/generic/scalar/select.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/merge.h> template<typename Iterator1, typename Iterator2> struct select_functor { Iterator1 first1, last1; Iterator2 first2, last2; int k; select_functor(Iterator1 f1, Iterator1 l1, Iterator2 f2, Iterator2 l2, int kk) : first1(f1), last1(l1), first2(f2), last2(l2), k(kk) {} template<typename Dummy> __host__ __device__ typename thrust::iterator_value<Iterator1>::type operator()(Dummy) { typedef typename thrust::iterator_value<Iterator1>::type value_type; return thrust::detail::device::generic::scalar::select(first1, last1, first2, last2, k, thrust::less<value_type>()); } }; template<typename T> void TestSelect(const size_t n) { #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC && TORCH_HIP_VERSION < 3020 KNOWN_FAILURE; #else if(n == 0) return; typedef typename thrust::device_vector<T>::iterator iterator; thrust::host_vector<T> h_A = unittest::random_integers<T>(n); thrust::host_vector<T> h_B = unittest::random_integers<T>(n); // A and B must be sorted thrust::stable_sort(h_A.begin(), h_A.end()); thrust::stable_sort(h_B.begin(), h_B.end()); thrust::device_vector<T> d_A = h_A; thrust::device_vector<T> d_B = h_B; // merge A and B to create a reference thrust::host_vector<T> ref; ref.insert(ref.end(), h_A.begin(), h_A.end()); ref.insert(ref.end(), h_B.begin(), h_B.end()); thrust::merge(h_A.begin(), h_A.end(), h_B.begin(), h_B.end(), ref.begin()); // choose some interesting values for k const size_t n_k = 6; const int k[n_k] = {0, n-1, n/2, n, thrust::min(n+1, 2*n-1), 2*n-1}; for(size_t i = 0; i < n_k; ++i) { // test device thrust::device_vector<T> result(1); select_functor<iterator,iterator> f(d_A.begin(), d_A.end(), d_B.begin(), d_B.end(), k[i]); thrust::transform(thrust::make_counting_iterator(0u), thrust::make_counting_iterator(1u), result.begin(), f); ASSERT_EQUAL(ref[k[i]], (T) result[0]); } #endif } DECLARE_VARIABLE_UNITTEST(TestSelect); template<typename U> void TestSelectKeyValue(const size_t n) { #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC && TORCH_HIP_VERSION < 3020 KNOWN_FAILURE; #else if(n == 0) return; typedef key_value<U,U> T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::host_vector<U> h_keys_A = unittest::random_integers<U>(n); thrust::host_vector<U> h_values_A = unittest::random_integers<U>(n); thrust::host_vector<U> h_keys_B = unittest::random_integers<U>(n); thrust::host_vector<U> h_values_B = unittest::random_integers<U>(n); thrust::host_vector<T> h_A(n), h_B(n); for(size_t i = 0; i < n; ++i) { h_A[i] = T(h_keys_A[i], h_values_A[i]); h_B[i] = T(h_keys_B[i], h_values_B[i]); } // A and B must be sorted thrust::stable_sort(h_A.begin(), h_A.end()); thrust::stable_sort(h_B.begin(), h_B.end()); thrust::device_vector<T> d_A = h_A; thrust::device_vector<T> d_B = h_B; // merge A and B to create a reference thrust::host_vector<T> ref; ref.insert(ref.end(), h_A.begin(), h_A.end()); ref.insert(ref.end(), h_B.begin(), h_B.end()); thrust::merge(h_A.begin(), h_A.end(), h_B.begin(), h_B.end(), ref.begin()); // choose some interesting values for k const size_t n_k = 6; const int k[n_k] = {0, n-1, n/2, n, thrust::min(n+1, 2*n-1), 2*n-1}; for(size_t i = 0; i < n_k; ++i) { // test device thrust::device_vector<T> result(1); select_functor<iterator,iterator> f(d_A.begin(), d_A.end(), d_B.begin(), d_B.end(), k[i]); thrust::transform(thrust::make_counting_iterator(0u), thrust::make_counting_iterator(1u), result.begin(), f); ASSERT_EQUAL(ref[k[i]], (T) result[0]); } #endif } DECLARE_VARIABLE_UNITTEST(TestSelectKeyValue);
110b8209d3620372e6bf93d56abe3b672015b395.cu
#include <unittest/unittest.h> #include <thrust/detail/device/generic/scalar/select.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/merge.h> template<typename Iterator1, typename Iterator2> struct select_functor { Iterator1 first1, last1; Iterator2 first2, last2; int k; select_functor(Iterator1 f1, Iterator1 l1, Iterator2 f2, Iterator2 l2, int kk) : first1(f1), last1(l1), first2(f2), last2(l2), k(kk) {} template<typename Dummy> __host__ __device__ typename thrust::iterator_value<Iterator1>::type operator()(Dummy) { typedef typename thrust::iterator_value<Iterator1>::type value_type; return thrust::detail::device::generic::scalar::select(first1, last1, first2, last2, k, thrust::less<value_type>()); } }; template<typename T> void TestSelect(const size_t n) { #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC && CUDA_VERSION < 3020 KNOWN_FAILURE; #else if(n == 0) return; typedef typename thrust::device_vector<T>::iterator iterator; thrust::host_vector<T> h_A = unittest::random_integers<T>(n); thrust::host_vector<T> h_B = unittest::random_integers<T>(n); // A and B must be sorted thrust::stable_sort(h_A.begin(), h_A.end()); thrust::stable_sort(h_B.begin(), h_B.end()); thrust::device_vector<T> d_A = h_A; thrust::device_vector<T> d_B = h_B; // merge A and B to create a reference thrust::host_vector<T> ref; ref.insert(ref.end(), h_A.begin(), h_A.end()); ref.insert(ref.end(), h_B.begin(), h_B.end()); thrust::merge(h_A.begin(), h_A.end(), h_B.begin(), h_B.end(), ref.begin()); // choose some interesting values for k const size_t n_k = 6; const int k[n_k] = {0, n-1, n/2, n, thrust::min(n+1, 2*n-1), 2*n-1}; for(size_t i = 0; i < n_k; ++i) { // test device thrust::device_vector<T> result(1); select_functor<iterator,iterator> f(d_A.begin(), d_A.end(), d_B.begin(), d_B.end(), k[i]); thrust::transform(thrust::make_counting_iterator(0u), thrust::make_counting_iterator(1u), result.begin(), f); ASSERT_EQUAL(ref[k[i]], (T) result[0]); } #endif } DECLARE_VARIABLE_UNITTEST(TestSelect); template<typename U> void TestSelectKeyValue(const size_t n) { #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC && CUDA_VERSION < 3020 KNOWN_FAILURE; #else if(n == 0) return; typedef key_value<U,U> T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::host_vector<U> h_keys_A = unittest::random_integers<U>(n); thrust::host_vector<U> h_values_A = unittest::random_integers<U>(n); thrust::host_vector<U> h_keys_B = unittest::random_integers<U>(n); thrust::host_vector<U> h_values_B = unittest::random_integers<U>(n); thrust::host_vector<T> h_A(n), h_B(n); for(size_t i = 0; i < n; ++i) { h_A[i] = T(h_keys_A[i], h_values_A[i]); h_B[i] = T(h_keys_B[i], h_values_B[i]); } // A and B must be sorted thrust::stable_sort(h_A.begin(), h_A.end()); thrust::stable_sort(h_B.begin(), h_B.end()); thrust::device_vector<T> d_A = h_A; thrust::device_vector<T> d_B = h_B; // merge A and B to create a reference thrust::host_vector<T> ref; ref.insert(ref.end(), h_A.begin(), h_A.end()); ref.insert(ref.end(), h_B.begin(), h_B.end()); thrust::merge(h_A.begin(), h_A.end(), h_B.begin(), h_B.end(), ref.begin()); // choose some interesting values for k const size_t n_k = 6; const int k[n_k] = {0, n-1, n/2, n, thrust::min(n+1, 2*n-1), 2*n-1}; for(size_t i = 0; i < n_k; ++i) { // test device thrust::device_vector<T> result(1); select_functor<iterator,iterator> f(d_A.begin(), d_A.end(), d_B.begin(), d_B.end(), k[i]); thrust::transform(thrust::make_counting_iterator(0u), thrust::make_counting_iterator(1u), result.begin(), f); ASSERT_EQUAL(ref[k[i]], (T) result[0]); } #endif } DECLARE_VARIABLE_UNITTEST(TestSelectKeyValue);
a62b893c2bc14e601fda3fa788f286a8a1121e7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void HelloThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } __global__ void HelloBlockThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } int main() { hipLaunchKernelGGL(( HelloThreadIdx), dim3(2), dim3(4), 0, 0, ); hipLaunchKernelGGL(( HelloBlockThreadIdx), dim3(2), dim3(4), 0, 0, ); hipDeviceSynchronize(); return 0; }
a62b893c2bc14e601fda3fa788f286a8a1121e7c.cu
#include <stdio.h> __global__ void HelloThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } __global__ void HelloBlockThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } int main() { HelloThreadIdx<<<2, 4>>>(); HelloBlockThreadIdx<<<2, 4>>>(); cudaDeviceSynchronize(); return 0; }
faf5befc1e54e0b421ed85c2df703b9fe65ed0cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or * distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #include <assert.h> #include <cutil_inline.h> #include "../benchmark_common.h" #include "convolutionSeparable_common.h" //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH]; extern "C" void setConvolutionKernel(float* h_Kernel, hipStream_t stream_app) { hipMemcpyToSymbolAsync(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float), 0, hipMemcpyHostToDevice, stream_app); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 4 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel(float* d_Dst, float* d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; // Left halo for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Right halo for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRowsGPU(float* d_Dst, float* d_Src, int imageW, int imageH, hipStream_t stream_app) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, stream_app, d_Dst, d_Src, imageW, imageH, imageW); cutilCheckMsg("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 4 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel(float* d_Dst, float* d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X] [(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Upper halo for (int i = 0; i < COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; // Lower halo for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; // Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } extern "C" void convolutionColumnsGPU(float* d_Dst, float* d_Src, int imageW, int imageH, hipStream_t stream_app) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, stream_app, d_Dst, d_Src, imageW, imageH, imageW); cutilCheckMsg("convolutionColumnsKernel() execution failed\n"); }
faf5befc1e54e0b421ed85c2df703b9fe65ed0cc.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or * distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #include <assert.h> #include <cutil_inline.h> #include "../benchmark_common.h" #include "convolutionSeparable_common.h" //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH]; extern "C" void setConvolutionKernel(float* h_Kernel, cudaStream_t stream_app) { cudaMemcpyToSymbolAsync(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float), 0, cudaMemcpyHostToDevice, stream_app); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 4 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel(float* d_Dst, float* d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; // Left halo for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Right halo for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRowsGPU(float* d_Dst, float* d_Src, int imageW, int imageH, cudaStream_t stream_app) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads, 0, stream_app>>>( d_Dst, d_Src, imageW, imageH, imageW); cutilCheckMsg("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 4 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel(float* d_Dst, float* d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X] [(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Upper halo for (int i = 0; i < COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; // Lower halo for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; // Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } extern "C" void convolutionColumnsGPU(float* d_Dst, float* d_Src, int imageW, int imageH, cudaStream_t stream_app) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads, 0, stream_app>>>( d_Dst, d_Src, imageW, imageH, imageW); cutilCheckMsg("convolutionColumnsKernel() execution failed\n"); }
4a76c18c0641118c4035157fcf495a8fba56f29c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using std::cout; using std::endl; void printDeviceInfo(hipDeviceProp_t prop, int idx){ cout << "[" << idx << "]\n"; cout << " Name: " << prop.name << endl; cout << " Major: " << prop.major << endl; cout << " Minor: " << prop.major << endl; cout << " Total Global Memory: " << prop.totalGlobalMem << endl; cout << " Total Shared Memory per Block: " << prop.sharedMemPerBlock << endl; int i; int dim = 3; cout << " Maximun Block Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxThreadsDim[i] << endl; } cout << " Maximun Grid Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxGridSize[i] << endl; } cout << " Warp Size: " << prop.warpSize << endl; cout << " Maximun Threads per Block: " << prop.maxThreadsPerBlock << endl; cout << " Number of Multiprocessors: " << prop.multiProcessorCount << endl; } int main(int argc, char const *argv[]){ hipDeviceProp_t prop; int count = 0; int i; hipGetDeviceCount(&count); for(i=0; i<count ;i++ ){ hipGetDeviceProperties(&prop, i); printDeviceInfo(prop, i); cout << endl; } return 0; }
4a76c18c0641118c4035157fcf495a8fba56f29c.cu
#include <iostream> using std::cout; using std::endl; void printDeviceInfo(cudaDeviceProp prop, int idx){ cout << "[" << idx << "]\n"; cout << " Name: " << prop.name << endl; cout << " Major: " << prop.major << endl; cout << " Minor: " << prop.major << endl; cout << " Total Global Memory: " << prop.totalGlobalMem << endl; cout << " Total Shared Memory per Block: " << prop.sharedMemPerBlock << endl; int i; int dim = 3; cout << " Maximun Block Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxThreadsDim[i] << endl; } cout << " Maximun Grid Dim: \n"; for(i=0; i<dim; i++){ cout << " Dim " << i << ": " << prop.maxGridSize[i] << endl; } cout << " Warp Size: " << prop.warpSize << endl; cout << " Maximun Threads per Block: " << prop.maxThreadsPerBlock << endl; cout << " Number of Multiprocessors: " << prop.multiProcessorCount << endl; } int main(int argc, char const *argv[]){ cudaDeviceProp prop; int count = 0; int i; cudaGetDeviceCount(&count); for(i=0; i<count ;i++ ){ cudaGetDeviceProperties(&prop, i); printDeviceInfo(prop, i); cout << endl; } return 0; }
d02a856a71eecafd18a67773d8db34a17366a891.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int Nx, int Ny){ unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * Ny + ix; if (ix < Nx && iy < Ny){ C[idx] = A[idx] + B[idx]; } }
d02a856a71eecafd18a67773d8db34a17366a891.cu
#include "includes.h" __global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int Nx, int Ny){ unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * Ny + ix; if (ix < Nx && iy < Ny){ C[idx] = A[idx] + B[idx]; } }
27760660bda791a9087b68601110b3dd933831ef.hip
// !!! This is a file automatically generated by hipify!!! #include "Tally.hh" #ifdef __HIPCC__ #include <thrust/transform.h> #include <thrust/execution_policy.h> #endif namespace MonteRay{ void Tally::clear() { auto clearFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&&) { return 0; }; #ifdef __HIPCC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #else std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #endif } void Tally::accumulate() { nSamples_++; if (not this->useStats()){ return; } else { auto accumulateFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&& contribution, auto&& sumAndSumSq) { return MeanAndStdDev{sumAndSumSq.sum() + contribution, sumAndSumSq.sumSq() + contribution*contribution}; }; auto clearFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&&) { return 0; }; #ifdef __HIPCC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), stats_.begin(), stats_.begin(), accumulateFunc); thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #else std::transform(contributions_.begin(), contributions_.end(), stats_.begin(), stats_.begin(), accumulateFunc); std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #endif } } void Tally::computeStats() { if (this->useStats()){ auto statsFunc = [nSamples_ = this->nSamples_] CUDADEVICE_CALLABLE_MEMBER (auto&& sumAndSumSq){ auto mean = sumAndSumSq.sum()/nSamples_; auto stdDev = nSamples_ > 1 ? Math::sqrt( (sumAndSumSq.sumSq()/nSamples_ - mean*mean)/(nSamples_ - 1) ) : 0; return MeanAndStdDev{mean, stdDev}; }; #ifdef __HIPCC__ thrust::transform(thrust::device, stats_.begin(), stats_.end(), stats_.begin(), statsFunc); #else std::transform(stats_.begin(), stats_.end(), stats_.begin(), statsFunc); #endif } else { auto statsFunc = [nSamples_ = this->nSamples_] CUDADEVICE_CALLABLE_MEMBER (auto&& contribution){ return contribution/nSamples_; }; #ifdef __HIPCC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), statsFunc); #else std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), statsFunc); #endif } } void Tally::gatherImpl(int mpiRank, const MPI_Comm& mpiComm){ if (mpiComm != MPI_COMM_NULL){ if(mpiRank == 0){ MPI_Reduce(MPI_IN_PLACE, contributions_.data(), contributions_.size(), MPI_DOUBLE, MPI_SUM, 0, mpiComm); } else { MPI_Reduce( contributions_.data(), nullptr, contributions_.size(), MPI_DOUBLE, MPI_SUM, 0, mpiComm); std::memset( contributions_.data(), 0, contributions_.size()*sizeof( TallyFloat ) ); } } } // Gather inter-work group ranks void Tally::gather() { if( ! MonteRay::isWorkGroupMaster() ) return; const auto& PA = MonteRayParallelAssistant::getInstance(); this->gatherImpl(PA.getInterWorkGroupRank(), PA.getInterWorkGroupCommunicator()); } // Gather intra-work group ranks void Tally::gatherWorkGroup() { const auto& PA = MonteRayParallelAssistant::getInstance(); if( ! PA.isParallel() ) return; this->gatherImpl(PA.getWorkGroupRank(), PA.getWorkGroupCommunicator()); } }
27760660bda791a9087b68601110b3dd933831ef.cu
#include "Tally.hh" #ifdef __CUDACC__ #include <thrust/transform.h> #include <thrust/execution_policy.h> #endif namespace MonteRay{ void Tally::clear() { auto clearFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&&) { return 0; }; #ifdef __CUDACC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #else std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #endif } void Tally::accumulate() { nSamples_++; if (not this->useStats()){ return; } else { auto accumulateFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&& contribution, auto&& sumAndSumSq) { return MeanAndStdDev{sumAndSumSq.sum() + contribution, sumAndSumSq.sumSq() + contribution*contribution}; }; auto clearFunc = [] CUDADEVICE_CALLABLE_MEMBER (auto&&) { return 0; }; #ifdef __CUDACC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), stats_.begin(), stats_.begin(), accumulateFunc); thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #else std::transform(contributions_.begin(), contributions_.end(), stats_.begin(), stats_.begin(), accumulateFunc); std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), clearFunc); #endif } } void Tally::computeStats() { if (this->useStats()){ auto statsFunc = [nSamples_ = this->nSamples_] CUDADEVICE_CALLABLE_MEMBER (auto&& sumAndSumSq){ auto mean = sumAndSumSq.sum()/nSamples_; auto stdDev = nSamples_ > 1 ? Math::sqrt( (sumAndSumSq.sumSq()/nSamples_ - mean*mean)/(nSamples_ - 1) ) : 0; return MeanAndStdDev{mean, stdDev}; }; #ifdef __CUDACC__ thrust::transform(thrust::device, stats_.begin(), stats_.end(), stats_.begin(), statsFunc); #else std::transform(stats_.begin(), stats_.end(), stats_.begin(), statsFunc); #endif } else { auto statsFunc = [nSamples_ = this->nSamples_] CUDADEVICE_CALLABLE_MEMBER (auto&& contribution){ return contribution/nSamples_; }; #ifdef __CUDACC__ thrust::transform(thrust::device, contributions_.begin(), contributions_.end(), contributions_.begin(), statsFunc); #else std::transform(contributions_.begin(), contributions_.end(), contributions_.begin(), statsFunc); #endif } } void Tally::gatherImpl(int mpiRank, const MPI_Comm& mpiComm){ if (mpiComm != MPI_COMM_NULL){ if(mpiRank == 0){ MPI_Reduce(MPI_IN_PLACE, contributions_.data(), contributions_.size(), MPI_DOUBLE, MPI_SUM, 0, mpiComm); } else { MPI_Reduce( contributions_.data(), nullptr, contributions_.size(), MPI_DOUBLE, MPI_SUM, 0, mpiComm); std::memset( contributions_.data(), 0, contributions_.size()*sizeof( TallyFloat ) ); } } } // Gather inter-work group ranks void Tally::gather() { if( ! MonteRay::isWorkGroupMaster() ) return; const auto& PA = MonteRayParallelAssistant::getInstance(); this->gatherImpl(PA.getInterWorkGroupRank(), PA.getInterWorkGroupCommunicator()); } // Gather intra-work group ranks void Tally::gatherWorkGroup() { const auto& PA = MonteRayParallelAssistant::getInstance(); if( ! PA.isParallel() ) return; this->gatherImpl(PA.getWorkGroupRank(), PA.getWorkGroupCommunicator()); } }
aec07ca960ff481593a4889ac4c920d2f522ad0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * rovernet */ #include "cudaUtility.h" // gpuResample __global__ void gpuResize( float2 scale, float* input, int iPitch, float* output, int oPitch, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= oWidth || y >= oHeight ) return; const int dx = (float)x * scale.x; const int dy = (float)y * scale.y; const float px = input[ dy * iPitch + dx ]; output[y*oPitch+x] = px; } // cudaResize hipError_t cudaResize( float* input, size_t inputPitch, size_t inputWidth, size_t inputHeight, float* output, size_t outputPitch, size_t outputWidth, size_t outputHeight ) { if( !input || !output ) return hipErrorInvalidDevicePointer; if( inputPitch == 0 || outputPitch == 0 || inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return hipErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(outputWidth/blockDim.x, outputHeight/blockDim.y); hipLaunchKernelGGL(( gpuResize), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputPitch, output, outputPitch, outputWidth, outputHeight); return CUDA(hipGetLastError()); }
aec07ca960ff481593a4889ac4c920d2f522ad0d.cu
/* * rovernet */ #include "cudaUtility.h" // gpuResample __global__ void gpuResize( float2 scale, float* input, int iPitch, float* output, int oPitch, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= oWidth || y >= oHeight ) return; const int dx = (float)x * scale.x; const int dy = (float)y * scale.y; const float px = input[ dy * iPitch + dx ]; output[y*oPitch+x] = px; } // cudaResize cudaError_t cudaResize( float* input, size_t inputPitch, size_t inputWidth, size_t inputHeight, float* output, size_t outputPitch, size_t outputWidth, size_t outputHeight ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( inputPitch == 0 || outputPitch == 0 || inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return cudaErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(outputWidth/blockDim.x, outputHeight/blockDim.y); gpuResize<<<gridDim, blockDim>>>(scale, input, inputPitch, output, outputPitch, outputWidth, outputHeight); return CUDA(cudaGetLastError()); }
302cc54e0b2e947a0fefbab10c99a451439f521d.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include <stdlib.h> #include <stdio.h> #include "neighborList.h" #include "tools.h" } #include "hip/hip_runtime_api.h" extern __constant__ struct sphere_param d_partParams; __global__ void CheckCriterion (double *foldedPos, double *nlistPos, int *nlist, int *renewalFlag) { double halfSkinDis2 = 0.25 * d_partParams.nlistRenewal * d_partParams.nlistRenewal; // (0.5*skin depth)^2 unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (index < d_partParams.num_beads) { int x = index*3; int y = x+1; int z = x+2; double dr[3]; double dr2; if (*renewalFlag==0) { if (d_partParams.wallFlag == 1) { dr[0] = n_image (foldedPos[x] - nlistPos[x], d_partParams.lx); dr[1] = foldedPos[y] - nlistPos[y]; dr[2] = n_image (foldedPos[z] - nlistPos[z], d_partParams.lz); } else if (d_partParams.wallFlag == 2) { dr[0] = n_image (foldedPos[x] - nlistPos[x], d_partParams.lx); dr[1] = foldedPos[y] - nlistPos[y]; dr[2] = foldedPos[z] - nlistPos[z]; } else { printf ("wall flag value is wrong in 'CheckCriterion'\n"); } dr2 = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2]; if (dr2 > halfSkinDis2) { *renewalFlag = 1; } } // keep foldedPos for next check and zero each node's variable of the # of neighbors // Should be done as the neighbor list has to be renewed. //nlistPos[x] = foldedPos[x]; //nlistPos[y] = foldedPos[y]; //nlistPos[z] = foldedPos[z]; //nlist[index*MAX_N] = 0; } } //extern "C" //void CheckCriterion_wrapper (struct sphere_param host_partParams, int *host_renewalFlag, struct sphere_param *dev_partParams, double *dev_foldedPos, double *dev_nlistPos, int *dev_nlist, int *dev_renewalFlag) { // // // hostPartParams is an extern variable // int threads_per_block = 64; // int blocks_per_grid_y = 4; // int blocks_per_grid_x = (host_partParams.num_beads + threads_per_block*blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y); // dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1); // // // Zero renewalFlag // *host_renewalFlag = 0; // hipMemcpy (dev_renewalFlag, host_renewalFlag, sizeof(int), hipMemcpyHostToDevice); // // // devPartParams and devRenewalFlag are extern // hipLaunchKernelGGL(( CheckCriterion) , dim3(dim_grid), dim3(threads_per_block), 0, 0, dev_partParams, dev_foldedPos, dev_nlistPos, dev_nlist, dev_renewalFlag); // // hipMemcpy (host_renewalFlag, dev_renewalFlag, sizeof(int), hipMemcpyDeviceToHost); // // //printf ("CheckCriterion has been done.\n"); //} extern "C" int RenewNeighborList_gpu (struct sphere_param h_params, double *h_foldedPos, double *h_nlistPos, int *h_numNeighbors, int *h_nlist, double *d_foldedPos, double *d_nlistPos, int *d_numNeighbors, int *d_nlist) { int frequency = 0; int h_renewalFlag = 0; int *d_renewalFlag; unsigned int numNodes = h_params.num_beads; // CheckCriterion_wrapper (host_partParams, &host_renewalFlag, dev_partParams, dev_foldedPos, dev_nlistPos, dev_nlist, dev_renewalFlag); int threads_per_block = 64; int blocks_per_grid_y = 4; int blocks_per_grid_x = (numNodes + threads_per_block*blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y); dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1); hipMalloc ((void**)&d_renewalFlag, sizeof(int)); hipMemcpy (d_renewalFlag, &h_renewalFlag, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( CheckCriterion) , dim3(dim_grid), dim3(threads_per_block), 0, 0, d_foldedPos, d_nlistPos, d_nlist, d_renewalFlag); hipMemcpy (&h_renewalFlag, d_renewalFlag, sizeof(int), hipMemcpyDeviceToHost); hipFree (d_renewalFlag); //hipDeviceSynchronize(); // If the criterion is met, update the neighbor list if (h_renewalFlag == 1) { hipMemcpy (h_foldedPos, d_foldedPos, numNodes*3*sizeof(double), hipMemcpyDeviceToHost); ConstructNeighborList (h_params, h_foldedPos, h_nlistPos, h_numNeighbors, h_nlist); hipMemcpy (d_numNeighbors, h_numNeighbors, numNodes*sizeof(int), hipMemcpyHostToDevice); hipMemcpy (d_nlist, h_nlist, numNodes*MAX_N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy (d_nlistPos, h_nlistPos, numNodes*3*sizeof(double), hipMemcpyHostToDevice); frequency = 1; } return frequency; }
302cc54e0b2e947a0fefbab10c99a451439f521d.cu
extern "C" { #include <stdlib.h> #include <stdio.h> #include "neighborList.h" #include "tools.h" } #include "cuda_runtime_api.h" extern __constant__ struct sphere_param d_partParams; __global__ void CheckCriterion (double *foldedPos, double *nlistPos, int *nlist, int *renewalFlag) { double halfSkinDis2 = 0.25 * d_partParams.nlistRenewal * d_partParams.nlistRenewal; // (0.5*skin depth)^2 unsigned int index = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (index < d_partParams.num_beads) { int x = index*3; int y = x+1; int z = x+2; double dr[3]; double dr2; if (*renewalFlag==0) { if (d_partParams.wallFlag == 1) { dr[0] = n_image (foldedPos[x] - nlistPos[x], d_partParams.lx); dr[1] = foldedPos[y] - nlistPos[y]; dr[2] = n_image (foldedPos[z] - nlistPos[z], d_partParams.lz); } else if (d_partParams.wallFlag == 2) { dr[0] = n_image (foldedPos[x] - nlistPos[x], d_partParams.lx); dr[1] = foldedPos[y] - nlistPos[y]; dr[2] = foldedPos[z] - nlistPos[z]; } else { printf ("wall flag value is wrong in 'CheckCriterion'\n"); } dr2 = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2]; if (dr2 > halfSkinDis2) { *renewalFlag = 1; } } // keep foldedPos for next check and zero each node's variable of the # of neighbors // Should be done as the neighbor list has to be renewed. //nlistPos[x] = foldedPos[x]; //nlistPos[y] = foldedPos[y]; //nlistPos[z] = foldedPos[z]; //nlist[index*MAX_N] = 0; } } //extern "C" //void CheckCriterion_wrapper (struct sphere_param host_partParams, int *host_renewalFlag, struct sphere_param *dev_partParams, double *dev_foldedPos, double *dev_nlistPos, int *dev_nlist, int *dev_renewalFlag) { // // // hostPartParams is an extern variable // int threads_per_block = 64; // int blocks_per_grid_y = 4; // int blocks_per_grid_x = (host_partParams.num_beads + threads_per_block*blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y); // dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1); // // // Zero renewalFlag // *host_renewalFlag = 0; // cudaMemcpy (dev_renewalFlag, host_renewalFlag, sizeof(int), cudaMemcpyHostToDevice); // // // devPartParams and devRenewalFlag are extern // CheckCriterion <<<dim_grid, threads_per_block>>> (dev_partParams, dev_foldedPos, dev_nlistPos, dev_nlist, dev_renewalFlag); // // cudaMemcpy (host_renewalFlag, dev_renewalFlag, sizeof(int), cudaMemcpyDeviceToHost); // // //printf ("CheckCriterion has been done.\n"); //} extern "C" int RenewNeighborList_gpu (struct sphere_param h_params, double *h_foldedPos, double *h_nlistPos, int *h_numNeighbors, int *h_nlist, double *d_foldedPos, double *d_nlistPos, int *d_numNeighbors, int *d_nlist) { int frequency = 0; int h_renewalFlag = 0; int *d_renewalFlag; unsigned int numNodes = h_params.num_beads; // CheckCriterion_wrapper (host_partParams, &host_renewalFlag, dev_partParams, dev_foldedPos, dev_nlistPos, dev_nlist, dev_renewalFlag); int threads_per_block = 64; int blocks_per_grid_y = 4; int blocks_per_grid_x = (numNodes + threads_per_block*blocks_per_grid_y - 1) / (threads_per_block * blocks_per_grid_y); dim3 dim_grid = make_uint3 (blocks_per_grid_x, blocks_per_grid_y, 1); cudaMalloc ((void**)&d_renewalFlag, sizeof(int)); cudaMemcpy (d_renewalFlag, &h_renewalFlag, sizeof(int), cudaMemcpyHostToDevice); CheckCriterion <<<dim_grid, threads_per_block>>> (d_foldedPos, d_nlistPos, d_nlist, d_renewalFlag); cudaMemcpy (&h_renewalFlag, d_renewalFlag, sizeof(int), cudaMemcpyDeviceToHost); cudaFree (d_renewalFlag); //cudaDeviceSynchronize(); // If the criterion is met, update the neighbor list if (h_renewalFlag == 1) { cudaMemcpy (h_foldedPos, d_foldedPos, numNodes*3*sizeof(double), cudaMemcpyDeviceToHost); ConstructNeighborList (h_params, h_foldedPos, h_nlistPos, h_numNeighbors, h_nlist); cudaMemcpy (d_numNeighbors, h_numNeighbors, numNodes*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (d_nlist, h_nlist, numNodes*MAX_N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy (d_nlistPos, h_nlistPos, numNodes*3*sizeof(double), cudaMemcpyHostToDevice); frequency = 1; } return frequency; }
ac070fb9011e6211be425a1c9426191653fab8b7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> void print_array(float *A, int n, int m) { for(int i=0; i<n; i++) { for (int j=0; j<m; j++) { printf("%.1f ", A[i*m+j]); } printf("\n"); } } __global__ void process_kernel1(float *input, float *output, int n, int m) { // Code for i int i= blockIdx.y * blockDim .y+ threadIdx .y; int j= blockIdx.x * blockDim.x+ threadIdx.x; if ((i<n) && (j<m)) { for(int l=0; l<n; l++){ for (int k = 0; k < m; k+=2) { output[i*l+k] = input[i*l+k+1]; output[i*l+k+1] = input[i*l+k]; } } } } int main(void) { hipError_t err = hipSuccess; int test_cases; scanf("%d",&test_cases); int m, n; scanf("%d %d", &m, &n); size_t size = m*n*sizeof(float); float *h_input = (float *)malloc(size); float *h_output = (float *)malloc(size); if (h_input == NULL || h_output == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for (int i = 0; i < n*m; ++i) { scanf("%f",&h_input[i]); } float *d_input = NULL; err = hipMalloc((void **)&d_input, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_input (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *d_output = NULL; err = hipMalloc((void **)&d_output, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_output (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_input, h_input, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector h_input from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //launching process_kernel1 int threadsPerBlock = 16; int blocksPerGrid = ((m*n)+threadsPerBlock-1)/threadsPerBlock; hipLaunchKernelGGL(( process_kernel1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input, d_output, n, m); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_output, d_output, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector d_output from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* // Verify that the result vectors are as expected for (int i = 0; i < numElements; ++i) { if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5) { fprintf(stderr, "Result verification for h_output1 failed at element %d! value \n", i, h_input1[i]); exit(EXIT_FAILURE); } } */ print_array(h_output,n,m); err = hipFree(d_input); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector d_input (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_output); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector d_output (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } free(h_input); free(h_output); err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
ac070fb9011e6211be425a1c9426191653fab8b7.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> void print_array(float *A, int n, int m) { for(int i=0; i<n; i++) { for (int j=0; j<m; j++) { printf("%.1f ", A[i*m+j]); } printf("\n"); } } __global__ void process_kernel1(float *input, float *output, int n, int m) { // Code for i int i= blockIdx.y * blockDim .y+ threadIdx .y; int j= blockIdx.x * blockDim.x+ threadIdx.x; if ((i<n) && (j<m)) { for(int l=0; l<n; l++){ for (int k = 0; k < m; k+=2) { output[i*l+k] = input[i*l+k+1]; output[i*l+k+1] = input[i*l+k]; } } } } int main(void) { cudaError_t err = cudaSuccess; int test_cases; scanf("%d",&test_cases); int m, n; scanf("%d %d", &m, &n); size_t size = m*n*sizeof(float); float *h_input = (float *)malloc(size); float *h_output = (float *)malloc(size); if (h_input == NULL || h_output == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for (int i = 0; i < n*m; ++i) { scanf("%f",&h_input[i]); } float *d_input = NULL; err = cudaMalloc((void **)&d_input, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_input (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_output = NULL; err = cudaMalloc((void **)&d_output, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_output (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector h_input from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //launching process_kernel1 int threadsPerBlock = 16; int blocksPerGrid = ((m*n)+threadsPerBlock-1)/threadsPerBlock; process_kernel1<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, n, m); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector d_output from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* // Verify that the result vectors are as expected for (int i = 0; i < numElements; ++i) { if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5) { fprintf(stderr, "Result verification for h_output1 failed at element %d! value \n", i, h_input1[i]); exit(EXIT_FAILURE); } } */ print_array(h_output,n,m); err = cudaFree(d_input); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector d_input (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_output); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector d_output (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } free(h_input); free(h_output); err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
46ef0582e858ba74c216c9a2e55310a137954834.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define SIZE 32 /* Autores: * * Antonio J. Cabrera * Paul Gazel-Anthoine */ // STRUCTS typedef struct bmpFileHeader { /* 2 bytes de identificacin */ uint32_t size; /* Tamao del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeader { uint32_t headersize; /* Tamao de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamao de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; // Rutinas BMP unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) { FILE *f; bmpFileHeader header; /* cabecera */ unsigned char *imgdata; /* datos de imagen */ uint16_t type; /* 2 bytes identificativos */ f=fopen (filename, "r"); if (!f) { /* Si no podemos leer, no hay imagen */ printf("NO se puede abrir el fichero %s\n", filename); return NULL; } /* Leemos los dos primeros bytes y comprobamos el formato */ fread(&type, sizeof(uint16_t), 1, f); if (type !=0x4D42) { fclose(f); printf("%s NO es una imagen BMP\n", filename); return NULL; } /* Leemos la cabecera del fichero */ fread(&header, sizeof(bmpFileHeader), 1, f); printf("File size: %u\n", header.size); printf("Reservado: %u\n", header.resv1); printf("Reservado: %u\n", header.resv2); printf("Offset: %u\n", header.offset); /* Leemos la cabecera de informacin del BMP */ fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f); /* Reservamos memoria para la imagen, lo que indique imgsize */ if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height; imgdata = (unsigned char*) malloc(bInfoHeader->imgsize); if (imgdata == NULL) { printf("Fallo en el malloc, del fichero %s\n", filename); exit(0); } /* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */ fseek(f, header.offset, SEEK_SET); /* Leemos los datos de la imagen, tantos bytes como imgsize */ fread(imgdata, bInfoHeader->imgsize,1, f); /* Cerramos el fichero */ fclose(f); /* Devolvemos la imagen */ return imgdata; } bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) { bmpInfoHeader *InfoHeader; bool IH; IH = malloc(sizeof(bmpInfoHeader)); if (!IH) return NULL; InfoHeader->headersize = sizeof(bmpInfoHeader); InfoHeader->width = width; InfoHeader->height = height; InfoHeader->planes = 1; InfoHeader->bpp = 24; InfoHeader->compress = 0; /* 3 bytes por pixel, width*height pixels, el tamao de las filas ha de ser multiplo de 4 */ InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height; InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54); InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */ InfoHeader->colors = 0; InfoHeader->imxtcolors = 0; return InfoHeader; } void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) { bmpFileHeader header; FILE *f; uint16_t type; f=fopen(filename, "w+"); header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2 header.resv1 = 0; header.resv2 = 0; /* El offset ser el tamao de las dos cabeceras + 2 (informacin de fichero)*/ header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2 /* Escribimos la identificacin del archivo */ type=0x4D42; fwrite(&type, sizeof(type),1,f); /* Escribimos la cabecera de fichero */ fwrite(&header, sizeof(bmpFileHeader),1,f); /* Escribimos la informacin bsica de la imagen */ fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f); /* Escribimos la imagen */ fwrite(imgdata, InfoHeader->imgsize, 1, f); fclose(f); } void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader) { printf("\n"); printf("Informacion de %s\n", FileName); printf("Tamao de la cabecera: %u bytes\n", InfoHeader->headersize); printf("Anchura: %d pixels\n", InfoHeader->width); printf("Altura: %d pixels\n", InfoHeader->height); printf("Planos (1): %d\n", InfoHeader->planes); printf("Bits por pixel: %d\n", InfoHeader->bpp); printf("Compresion: %d\n", InfoHeader->compress); printf("Tamao de la imagen: %u bytes\n", InfoHeader->imgsize); printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx); printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy); if (InfoHeader->bpmx == 0) InfoHeader->bpmx = (unsigned) ((double)24*100/2.54); if (InfoHeader->bpmy == 0) InfoHeader->bpmy = (unsigned) ((double)24*100/2.54); printf("Colores en paleta: %d\n", InfoHeader->colors); printf("Colores importantes: %d\n", InfoHeader->imxtcolors); } /* ------------------------------------------------ Nuestro Cdigo ------------------------------------------------ */ __global__ void KernelByN (int N, int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) A[row*N+col*3] = ((A[row*N+col*3] + A[row*N+col*3+1] + A[row*N+col*3+2])/3); } __global__ void KernelSobel1(int N,int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { double magnitudX, magnitudY; if(col != 0 && row != 0 && col != NS-1 && row != M-1) { magnitudX = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row)*N+(col-1)*3]*(-2) + A[(row+1)*N+(col+1)*3]*(-1) + A[(row-1)*N+(col+1)*3] + A[row*N+(col+1)*3]*2 + A[(row+1)*N+(col+1)*3]); magnitudY = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row+1)*N+(col-1)*3] + A[(row-1)*N+col*3]*(-2) + A[(row+1)*N+col*3]*2 + A[(row-1)*N+(col+1)*3]*(-1) + A[(row+1)*N+(col+1)*3]); S[row*NS+col] = (int)sqrt(magnitudX*magnitudX + magnitudY*magnitudY); } else S[row*NS+col] = 0; } } __global__ void KernelReduction(int NT,int *S, int *oMin, int *oMax) { //Reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (S[i] > -1) { if (sdataMax[tid] < S[i]) sdataMax[tid] = S[i]; if (sdataMin[tid] > S[i]) sdataMin[tid] = S[i]; } if (i+blockDim.x < NT && S[i+blockDim.x] > -1) { if (sdataMax[tid] < S[i+blockDim.x]) sdataMax[tid] = S[i+blockDim.x]; if (sdataMin[tid] > S[i+blockDim.x]) sdataMin[tid] = S[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { oMax[blockIdx.x] = sdataMax[0]; oMin[blockIdx.x] = sdataMin[0]; } } __global__ void KernelSobel2 (int N,int M,unsigned char *A, int *S, int NS, float factor) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { if(col != 0 && row != 0 && col != NS-1 && row != M-1) A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = (unsigned char)(S[row*NS+col] * factor); else A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = 0; } } int main(int argc, char** argv) { unsigned int N, M, NS; unsigned int numBytesA, numBytesS, numBytesR; unsigned int nBlocksX, nBlocksY, nBlocksR, nThreads; float TiempoTotal, TiempoKernel, factor; hipEvent_t E0, E1, E2, E3; unsigned char *d_A; int *d_S, *d_OutMax, *d_OutMin; if (argc != 3) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); } bmpInfoHeader header; unsigned char *image; image = LoadBMP(argv[1], &header); unsigned int N3 = header.width * 3; N = (N3+3) & 0xFFFFFFFC; //fila multiplo de 4 (BMP) M = header.height; NS = header.width; // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension nBlocksX = (NS+nThreads-1)/nThreads; nBlocksY = (M+nThreads-1)/nThreads; numBytesA = N * M * sizeof(unsigned char); numBytesS = NS * M * sizeof(int); nBlocksR = ((NS * M)+(nThreads*nThreads-1)) / (nThreads*nThreads); numBytesR = nBlocksR *sizeof(int); dim3 dimGrid(nBlocksX, nBlocksY, 1); dim3 dimBlock(nThreads, nThreads, 1); dim3 dimGridR(nBlocksR, 1, 1); dim3 dimBlockR(nThreads * nThreads, 1, 1); hipEventCreate(&E0); hipEventCreate(&E1); hipEventCreate(&E2); hipEventCreate(&E3); hipEventRecord(E0, 0); hipEventSynchronize(E0); // Obtener Memoria en el device hipMalloc((unsigned char**)&d_A, numBytesA); hipMalloc((int**)&d_S, numBytesS); hipMalloc((int**)&d_OutMax, numBytesR); hipMalloc((int**)&d_OutMin, numBytesR); int *hmax = (int*)malloc(numBytesR); int *hmin = (int*)malloc(numBytesR); // Copiar datos del host al device hipMemcpy(d_A, image, numBytesA, hipMemcpyHostToDevice); hipEventRecord(E1, 0); hipEventSynchronize(E1); // Ejecutar kernels hipLaunchKernelGGL(( KernelByN), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS); hipLaunchKernelGGL(( KernelSobel1), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS); hipLaunchKernelGGL(( KernelReduction), dim3(dimGridR),dim3(dimBlockR), 0, 0, NS*M, d_S, d_OutMin, d_OutMax); hipMemcpy(hmax, d_OutMax, numBytesR, hipMemcpyDeviceToHost); hipMemcpy(hmin, d_OutMin, numBytesR, hipMemcpyDeviceToHost); int max = hmax[0]; int min = hmin[0]; for (int i=1; i < nBlocksR; i++) { if (hmax[i] > max) max = hmax[i]; if (hmin[i] < min) min = hmin[i]; } factor = (float)(255.0/(float)(max-min)); hipLaunchKernelGGL(( KernelSobel2), dim3(dimGrid), dim3(dimBlock), 0, 0, N, M, d_A, d_S, NS, factor); hipEventRecord(E2, 0); hipEventSynchronize(E2); // Obtener el resultado desde el host hipMemcpy(image, d_A, numBytesA, hipMemcpyDeviceToHost); // Liberar Memoria del device hipFree(d_A); hipFree(d_S); hipFree(d_OutMin); hipFree(d_OutMax); hipEventRecord(E3, 0); hipEventSynchronize(E3); hipEventElapsedTime(&TiempoTotal, E0, E3); hipEventElapsedTime(&TiempoKernel, E1, E2); printf("\nEJECUCION\n"); printf("Dimensiones imagen: %dx%d\n", NS, M); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocksX, nBlocksY, nBlocksX*nBlocksY); printf("nThreadsR: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR: %dx%d (%d)\n", nBlocksR, 1, nBlocksR); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3); char nom[32]; strcpy(nom, argv[2]); strcat(nom, "_"); strcat(nom,argv[1]); SaveBMP(nom, &header, image); }
46ef0582e858ba74c216c9a2e55310a137954834.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #define SIZE 32 /* Autores: * * Antonio J. Cabrera * Paul Gazel-Anthoine */ // STRUCTS typedef struct bmpFileHeader { /* 2 bytes de identificación */ uint32_t size; /* Tamaño del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeader { uint32_t headersize; /* Tamaño de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamaño de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; // Rutinas BMP unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader) { FILE *f; bmpFileHeader header; /* cabecera */ unsigned char *imgdata; /* datos de imagen */ uint16_t type; /* 2 bytes identificativos */ f=fopen (filename, "r"); if (!f) { /* Si no podemos leer, no hay imagen */ printf("NO se puede abrir el fichero %s\n", filename); return NULL; } /* Leemos los dos primeros bytes y comprobamos el formato */ fread(&type, sizeof(uint16_t), 1, f); if (type !=0x4D42) { fclose(f); printf("%s NO es una imagen BMP\n", filename); return NULL; } /* Leemos la cabecera del fichero */ fread(&header, sizeof(bmpFileHeader), 1, f); printf("File size: %u\n", header.size); printf("Reservado: %u\n", header.resv1); printf("Reservado: %u\n", header.resv2); printf("Offset: %u\n", header.offset); /* Leemos la cabecera de información del BMP */ fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f); /* Reservamos memoria para la imagen, lo que indique imgsize */ if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height; imgdata = (unsigned char*) malloc(bInfoHeader->imgsize); if (imgdata == NULL) { printf("Fallo en el malloc, del fichero %s\n", filename); exit(0); } /* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */ fseek(f, header.offset, SEEK_SET); /* Leemos los datos de la imagen, tantos bytes como imgsize */ fread(imgdata, bInfoHeader->imgsize,1, f); /* Cerramos el fichero */ fclose(f); /* Devolvemos la imagen */ return imgdata; } bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) { bmpInfoHeader *InfoHeader; bool IH; IH = malloc(sizeof(bmpInfoHeader)); if (!IH) return NULL; InfoHeader->headersize = sizeof(bmpInfoHeader); InfoHeader->width = width; InfoHeader->height = height; InfoHeader->planes = 1; InfoHeader->bpp = 24; InfoHeader->compress = 0; /* 3 bytes por pixel, width*height pixels, el tamaño de las filas ha de ser multiplo de 4 */ InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height; InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54); InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */ InfoHeader->colors = 0; InfoHeader->imxtcolors = 0; return InfoHeader; } void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) { bmpFileHeader header; FILE *f; uint16_t type; f=fopen(filename, "w+"); header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) +2;//2 header.resv1 = 0; header.resv2 = 0; /* El offset será el tamaño de las dos cabeceras + 2 (información de fichero)*/ header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader) +2;//2 /* Escribimos la identificación del archivo */ type=0x4D42; fwrite(&type, sizeof(type),1,f); /* Escribimos la cabecera de fichero */ fwrite(&header, sizeof(bmpFileHeader),1,f); /* Escribimos la información básica de la imagen */ fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f); /* Escribimos la imagen */ fwrite(imgdata, InfoHeader->imgsize, 1, f); fclose(f); } void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader) { printf("\n"); printf("Informacion de %s\n", FileName); printf("Tamaño de la cabecera: %u bytes\n", InfoHeader->headersize); printf("Anchura: %d pixels\n", InfoHeader->width); printf("Altura: %d pixels\n", InfoHeader->height); printf("Planos (1): %d\n", InfoHeader->planes); printf("Bits por pixel: %d\n", InfoHeader->bpp); printf("Compresion: %d\n", InfoHeader->compress); printf("Tamaño de la imagen: %u bytes\n", InfoHeader->imgsize); printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx); printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy); if (InfoHeader->bpmx == 0) InfoHeader->bpmx = (unsigned) ((double)24*100/2.54); if (InfoHeader->bpmy == 0) InfoHeader->bpmy = (unsigned) ((double)24*100/2.54); printf("Colores en paleta: %d\n", InfoHeader->colors); printf("Colores importantes: %d\n", InfoHeader->imxtcolors); } /* ------------------------------------------------ Nuestro Código ------------------------------------------------ */ __global__ void KernelByN (int N, int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) A[row*N+col*3] = ((A[row*N+col*3] + A[row*N+col*3+1] + A[row*N+col*3+2])/3); } __global__ void KernelSobel1(int N,int M, unsigned char *A, int *S, int NS) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { double magnitudX, magnitudY; if(col != 0 && row != 0 && col != NS-1 && row != M-1) { magnitudX = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row)*N+(col-1)*3]*(-2) + A[(row+1)*N+(col+1)*3]*(-1) + A[(row-1)*N+(col+1)*3] + A[row*N+(col+1)*3]*2 + A[(row+1)*N+(col+1)*3]); magnitudY = (double)(A[(row-1)*N+(col-1)*3]*(-1) + A[(row+1)*N+(col-1)*3] + A[(row-1)*N+col*3]*(-2) + A[(row+1)*N+col*3]*2 + A[(row-1)*N+(col+1)*3]*(-1) + A[(row+1)*N+(col+1)*3]); S[row*NS+col] = (int)sqrt(magnitudX*magnitudX + magnitudY*magnitudY); } else S[row*NS+col] = 0; } } __global__ void KernelReduction(int NT,int *S, int *oMin, int *oMax) { //Reduction __shared__ int sdataMax[SIZE*SIZE]; __shared__ int sdataMin[SIZE*SIZE]; unsigned int s; int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; sdataMax[tid] = 0; sdataMin[tid] = 0x7FFFFFFF; while(i< NT) { if (S[i] > -1) { if (sdataMax[tid] < S[i]) sdataMax[tid] = S[i]; if (sdataMin[tid] > S[i]) sdataMin[tid] = S[i]; } if (i+blockDim.x < NT && S[i+blockDim.x] > -1) { if (sdataMax[tid] < S[i+blockDim.x]) sdataMax[tid] = S[i+blockDim.x]; if (sdataMin[tid] > S[i+blockDim.x]) sdataMin[tid] = S[i+blockDim.x]; } i += gridSize; } __syncthreads(); for (s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { if (sdataMax[tid] < sdataMax[tid+s]) sdataMax[tid] = sdataMax[tid+s]; if (sdataMin[tid] > sdataMin[tid+s]) sdataMin[tid] = sdataMin[tid+s]; } __syncthreads(); } // desenrrollamos el ultimo warp activo if (tid < 32) { volatile int *smemMax = sdataMax; volatile int *smemMin = sdataMin; if (smemMax[tid] < smemMax[tid+32]) smemMax[tid] = smemMax[tid+32]; if (smemMax[tid] < smemMax[tid+16]) smemMax[tid] = smemMax[tid+16]; if (smemMax[tid] < smemMax[tid+8]) smemMax[tid] = smemMax[tid+8]; if (smemMax[tid] < smemMax[tid+4]) smemMax[tid] = smemMax[tid+4]; if (smemMax[tid] < smemMax[tid+2]) smemMax[tid] = smemMax[tid+2]; if (smemMax[tid] < smemMax[tid+1]) smemMax[tid] = smemMax[tid+1]; if (smemMin[tid] < smemMin[tid+32]) smemMin[tid] = smemMin[tid+32]; if (smemMin[tid] < smemMin[tid+16]) smemMin[tid] = smemMin[tid+16]; if (smemMin[tid] < smemMin[tid+8]) smemMin[tid] = smemMin[tid+8]; if (smemMin[tid] < smemMin[tid+4]) smemMin[tid] = smemMin[tid+4]; if (smemMin[tid] < smemMin[tid+2]) smemMin[tid] = smemMin[tid+2]; if (smemMin[tid] < smemMin[tid+1]) smemMin[tid] = smemMin[tid+1]; } // El thread 0 escribe el resultado de este bloque en la memoria global if (tid == 0) { oMax[blockIdx.x] = sdataMax[0]; oMin[blockIdx.x] = sdataMin[0]; } } __global__ void KernelSobel2 (int N,int M,unsigned char *A, int *S, int NS, float factor) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < M && col < NS) { if(col != 0 && row != 0 && col != NS-1 && row != M-1) A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = (unsigned char)(S[row*NS+col] * factor); else A[row*N+col*3] = A[row*N+col*3+1] = A[row*N+col*3+2] = 0; } } int main(int argc, char** argv) { unsigned int N, M, NS; unsigned int numBytesA, numBytesS, numBytesR; unsigned int nBlocksX, nBlocksY, nBlocksR, nThreads; float TiempoTotal, TiempoKernel, factor; cudaEvent_t E0, E1, E2, E3; unsigned char *d_A; int *d_S, *d_OutMax, *d_OutMin; if (argc != 3) { printf("Usage: ./exe img.bmp prefix\n"); exit(0); } bmpInfoHeader header; unsigned char *image; image = LoadBMP(argv[1], &header); unsigned int N3 = header.width * 3; N = (N3+3) & 0xFFFFFFFC; //fila multiplo de 4 (BMP) M = header.height; NS = header.width; // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension nBlocksX = (NS+nThreads-1)/nThreads; nBlocksY = (M+nThreads-1)/nThreads; numBytesA = N * M * sizeof(unsigned char); numBytesS = NS * M * sizeof(int); nBlocksR = ((NS * M)+(nThreads*nThreads-1)) / (nThreads*nThreads); numBytesR = nBlocksR *sizeof(int); dim3 dimGrid(nBlocksX, nBlocksY, 1); dim3 dimBlock(nThreads, nThreads, 1); dim3 dimGridR(nBlocksR, 1, 1); dim3 dimBlockR(nThreads * nThreads, 1, 1); cudaEventCreate(&E0); cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); cudaEventRecord(E0, 0); cudaEventSynchronize(E0); // Obtener Memoria en el device cudaMalloc((unsigned char**)&d_A, numBytesA); cudaMalloc((int**)&d_S, numBytesS); cudaMalloc((int**)&d_OutMax, numBytesR); cudaMalloc((int**)&d_OutMin, numBytesR); int *hmax = (int*)malloc(numBytesR); int *hmin = (int*)malloc(numBytesR); // Copiar datos del host al device cudaMemcpy(d_A, image, numBytesA, cudaMemcpyHostToDevice); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); // Ejecutar kernels KernelByN<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS); KernelSobel1<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS); KernelReduction<<<dimGridR,dimBlockR>>>(NS*M, d_S, d_OutMin, d_OutMax); cudaMemcpy(hmax, d_OutMax, numBytesR, cudaMemcpyDeviceToHost); cudaMemcpy(hmin, d_OutMin, numBytesR, cudaMemcpyDeviceToHost); int max = hmax[0]; int min = hmin[0]; for (int i=1; i < nBlocksR; i++) { if (hmax[i] > max) max = hmax[i]; if (hmin[i] < min) min = hmin[i]; } factor = (float)(255.0/(float)(max-min)); KernelSobel2<<<dimGrid, dimBlock>>>(N, M, d_A, d_S, NS, factor); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); // Obtener el resultado desde el host cudaMemcpy(image, d_A, numBytesA, cudaMemcpyDeviceToHost); // Liberar Memoria del device cudaFree(d_A); cudaFree(d_S); cudaFree(d_OutMin); cudaFree(d_OutMax); cudaEventRecord(E3, 0); cudaEventSynchronize(E3); cudaEventElapsedTime(&TiempoTotal, E0, E3); cudaEventElapsedTime(&TiempoKernel, E1, E2); printf("\nEJECUCION\n"); printf("Dimensiones imagen: %dx%d\n", NS, M); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocksX, nBlocksY, nBlocksX*nBlocksY); printf("nThreadsR: %dx%d (%d)\n", nThreads*nThreads, 1, nThreads * nThreads); printf("nBlocksR: %dx%d (%d)\n", nBlocksR, 1, nBlocksR); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); char nom[32]; strcpy(nom, argv[2]); strcat(nom, "_"); strcat(nom,argv[1]); SaveBMP(nom, &header, image); }
5c5eeaaf751b8d5663c86f2a5daadd0bebdfffd3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #include <time.h> const int N=10000; const int NBLOCK=1024; __global__ void MatAdd( float *A, float *B, float *C, int N) { /*int j = blockIdx.x * blockDim.x + threadIdx.x; // Compute row index int i = blockIdx.y * blockDim.y + threadIdx.y; // Compute column index int index=i*N+j; // Compute global 1D index if (i < N && j < N) C[index] = A[index] + B[index]; // Compute C element*/ int idHebra = blockIdx.x * blockDim.x + threadIdx.x; if (idHebra < N){ //int inicio = idHebra * N; for (int i = 0; i < N; i++){ int index = idHebra * N + i; C[index] = A[index] + B[index]; } } } int main() { int i; const int NN=N*N; /* pointers to host memory */ /* Allocate arrays A, B and C on host*/ float * A = (float*) malloc(NN*sizeof(float)); float * B = (float*) malloc(NN*sizeof(float)); float * C = (float*) malloc(NN*sizeof(float)); /* pointers to device memory */ float *A_d, *B_d, *C_d; /* Allocate arrays a_d, b_d and c_d on device*/ hipMalloc ((void **) &A_d, sizeof(float)*NN); hipMalloc ((void **) &B_d, sizeof(float)*NN); hipMalloc ((void **) &C_d, sizeof(float)*NN); /* Initialize arrays a and b */ for (i=0; i<NN;i++) { A[i]= (float) 2; B[i]= (float) 2; } clock_t begin_time = clock(); /* Copy data from host memory to device memory */ hipMemcpy(A_d, A, sizeof(float)*NN, hipMemcpyHostToDevice); hipMemcpy(B_d, B, sizeof(float)*NN, hipMemcpyHostToDevice); /* Compute the execution configuration */ /*dim3 threadsPerBlock (16, 16); dim3 numBlocks( ceil ((float)(N)/threadsPerBlock.x), ceil ((float)(N)/threadsPerBlock.y) );*/hipLaunchKernelGGL(( MatAdd) , dim3(ceil((float)N/NBLOCK)), dim3(NBLOCK), 0, 0, A_d, B_d, C_d, N); /* Copy data from deveice memory to host memory */ hipMemcpy(C, C_d, sizeof(float)*NN, hipMemcpyDeviceToHost); double Tgpu = float(clock() - begin_time) / CLOCKS_PER_SEC; printf(" El tiempo consumido es de %f segundos", Tgpu); /* Print c */ /*for (i=0; i<NN;i++) printf(" c[%d]=%f\n",i,C[i]);*/ /* Free the memory */ free(A); free(B); free(C); hipFree(A_d); hipFree(B_d);hipFree(C_d); }
5c5eeaaf751b8d5663c86f2a5daadd0bebdfffd3.cu
#include "stdio.h" #include <time.h> const int N=10000; const int NBLOCK=1024; __global__ void MatAdd( float *A, float *B, float *C, int N) { /*int j = blockIdx.x * blockDim.x + threadIdx.x; // Compute row index int i = blockIdx.y * blockDim.y + threadIdx.y; // Compute column index int index=i*N+j; // Compute global 1D index if (i < N && j < N) C[index] = A[index] + B[index]; // Compute C element*/ int idHebra = blockIdx.x * blockDim.x + threadIdx.x; if (idHebra < N){ //int inicio = idHebra * N; for (int i = 0; i < N; i++){ int index = idHebra * N + i; C[index] = A[index] + B[index]; } } } int main() { int i; const int NN=N*N; /* pointers to host memory */ /* Allocate arrays A, B and C on host*/ float * A = (float*) malloc(NN*sizeof(float)); float * B = (float*) malloc(NN*sizeof(float)); float * C = (float*) malloc(NN*sizeof(float)); /* pointers to device memory */ float *A_d, *B_d, *C_d; /* Allocate arrays a_d, b_d and c_d on device*/ cudaMalloc ((void **) &A_d, sizeof(float)*NN); cudaMalloc ((void **) &B_d, sizeof(float)*NN); cudaMalloc ((void **) &C_d, sizeof(float)*NN); /* Initialize arrays a and b */ for (i=0; i<NN;i++) { A[i]= (float) 2; B[i]= (float) 2; } clock_t begin_time = clock(); /* Copy data from host memory to device memory */ cudaMemcpy(A_d, A, sizeof(float)*NN, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, sizeof(float)*NN, cudaMemcpyHostToDevice); /* Compute the execution configuration */ /*dim3 threadsPerBlock (16, 16); dim3 numBlocks( ceil ((float)(N)/threadsPerBlock.x), ceil ((float)(N)/threadsPerBlock.y) );*/ MatAdd <<<ceil((float)N/NBLOCK), NBLOCK>>> (A_d, B_d, C_d, N); /* Copy data from deveice memory to host memory */ cudaMemcpy(C, C_d, sizeof(float)*NN, cudaMemcpyDeviceToHost); double Tgpu = float(clock() - begin_time) / CLOCKS_PER_SEC; printf(" El tiempo consumido es de %f segundos", Tgpu); /* Print c */ /*for (i=0; i<NN;i++) printf(" c[%d]=%f\n",i,C[i]);*/ /* Free the memory */ free(A); free(B); free(C); cudaFree(A_d); cudaFree(B_d);cudaFree(C_d); }
785d4feda8cec41c5f48c546d230d6d4476b0f54.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <algorithm> #include <chrono> #include <random> #include <hip/hip_runtime.h> #include "reference.h" __global__ void zero_point ( const float* x_min, const float* x_max, int32_t qmin, int32_t qmax, int size, bool preserve_sparsity, float* scale, int32_t* zero_point) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { float min_val = x_min[i]; float max_val = x_max[i]; if (min_val < 0 && max_val > 0 && preserve_sparsity) { int symmetric_qmin = -((qmax - qmin) / 2 + 1); int symmetric_qmax = (qmax - qmin) / 2; double max_scale = fmax( fabs(min_val / symmetric_qmin), fabs(max_val / symmetric_qmax)); min_val = max_scale * symmetric_qmin; max_val = max_scale * symmetric_qmax; } // We extend the [min, max] interval to ensure that it contains 0. // Otherwise, we would not meet the requirement that 0 be an exactly // representable value. min_val = fminf(min_val, 0.f); max_val = fmaxf(max_val, 0.f); scale[i] = (static_cast<double>(max_val) - min_val) / (qmax - qmin); // Moving this check outside this function would result in extra Device to // Host copy of the min and max val which would result in a perf hit. if (scale[i] == 0.0f || isinf(1.0f / scale[i])) { scale[i] = 0.1; } double zero_point_from_min = qmin - min_val / static_cast<double>(scale[i]); double zero_point_from_max = qmax - max_val / static_cast<double>(scale[i]); double zero_point_from_min_error = abs(qmin) + abs(min_val / static_cast<double>(scale[i])); double zero_point_from_max_error = abs(qmax) + abs(max_val / static_cast<double>(scale[i])); double initial_zero_point = zero_point_from_min_error < zero_point_from_max_error ? zero_point_from_min : zero_point_from_max; // Note: preserve_sparsity here means symmetric quantization. // for symmetric quantization, we force zero_point // to be a middle value between qmin and qmax. // If either min or max is 0, then we just use 0 as zero_point. if (min_val < 0 && max_val > 0 && preserve_sparsity) { initial_zero_point = static_cast<double>(qmin + qmax) / 2; } // Now we need to nudge the zero point to be an integer // (our zero points are integer, and this is motivated by the // requirement to be able to represent the real value "0" exactly as a // quantized value, which is required in multiple places, for example in // Im2col with zero padding). int32_t nudged_zero_point = 0; if (initial_zero_point < qmin) { nudged_zero_point = qmin; } else if (initial_zero_point > qmax) { nudged_zero_point = qmax; } else { nudged_zero_point = nearbyint(initial_zero_point); } zero_point[i] = nudged_zero_point; } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <number of min/max values> <repeat>\n", argv[0]); return 1; } const int size = atoi(argv[1]); const int repeat = atoi(argv[2]); int32_t qmin = -127; int32_t qmax = 127; bool preserve_sparsity = true; size_t size_bytes = sizeof(float) * size; float *scale = (float*) malloc (size_bytes); float *scale_ref = (float*) malloc (size_bytes); int32_t *zp = (int32_t*) malloc (size_bytes); int32_t *zp_ref = (int32_t*) malloc (size_bytes); float *min = (float*) malloc (size_bytes); float *max = (float*) malloc (size_bytes); std::default_random_engine g (123); std::uniform_real_distribution<float> distr (-1.f, 1.f); for (int i = 0; i < size; i++) { min[i] = distr(g); max[i] = distr(g); } reference (min, max, qmin, qmax, size, preserve_sparsity, scale_ref, zp_ref); int32_t *d_zp; hipMalloc((void**)&d_zp, size_bytes); float *d_scale; hipMalloc((void**)&d_scale, size_bytes); float *d_min; hipMalloc((void**)&d_min, size_bytes); hipMemcpy(d_min, min, size_bytes, hipMemcpyHostToDevice); float *d_max; hipMalloc((void**)&d_max, size_bytes); hipMemcpy(d_max, max, size_bytes, hipMemcpyHostToDevice); const int block_size = 256; dim3 num_blocks = (size + block_size - 1) / block_size; dim3 num_threads(block_size); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( zero_point), dim3(num_blocks), dim3(num_threads), 0, 0, d_min, d_max, qmin, qmax, size, preserve_sparsity, d_scale, d_zp); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of zero-point kernel: %f (us)\n", (time * 1e-3f) / repeat); hipMemcpy(zp, d_zp, size_bytes, hipMemcpyDeviceToHost); hipMemcpy(scale, d_scale, size_bytes, hipMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < size; i++) { if (zp[i] != zp_ref[i] || scale[i] - scale_ref[i] > 1e-3f) { ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); hipFree(d_zp); hipFree(d_scale); hipFree(d_min); hipFree(d_max); free(zp); free(scale); free(zp_ref); free(scale_ref); free(min); free(max); return 0; }
785d4feda8cec41c5f48c546d230d6d4476b0f54.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <algorithm> #include <chrono> #include <random> #include <cuda.h> #include "reference.h" __global__ void zero_point ( const float* x_min, const float* x_max, int32_t qmin, int32_t qmax, int size, bool preserve_sparsity, float* scale, int32_t* zero_point) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { float min_val = x_min[i]; float max_val = x_max[i]; if (min_val < 0 && max_val > 0 && preserve_sparsity) { int symmetric_qmin = -((qmax - qmin) / 2 + 1); int symmetric_qmax = (qmax - qmin) / 2; double max_scale = fmax( fabs(min_val / symmetric_qmin), fabs(max_val / symmetric_qmax)); min_val = max_scale * symmetric_qmin; max_val = max_scale * symmetric_qmax; } // We extend the [min, max] interval to ensure that it contains 0. // Otherwise, we would not meet the requirement that 0 be an exactly // representable value. min_val = fminf(min_val, 0.f); max_val = fmaxf(max_val, 0.f); scale[i] = (static_cast<double>(max_val) - min_val) / (qmax - qmin); // Moving this check outside this function would result in extra Device to // Host copy of the min and max val which would result in a perf hit. if (scale[i] == 0.0f || isinf(1.0f / scale[i])) { scale[i] = 0.1; } double zero_point_from_min = qmin - min_val / static_cast<double>(scale[i]); double zero_point_from_max = qmax - max_val / static_cast<double>(scale[i]); double zero_point_from_min_error = abs(qmin) + abs(min_val / static_cast<double>(scale[i])); double zero_point_from_max_error = abs(qmax) + abs(max_val / static_cast<double>(scale[i])); double initial_zero_point = zero_point_from_min_error < zero_point_from_max_error ? zero_point_from_min : zero_point_from_max; // Note: preserve_sparsity here means symmetric quantization. // for symmetric quantization, we force zero_point // to be a middle value between qmin and qmax. // If either min or max is 0, then we just use 0 as zero_point. if (min_val < 0 && max_val > 0 && preserve_sparsity) { initial_zero_point = static_cast<double>(qmin + qmax) / 2; } // Now we need to nudge the zero point to be an integer // (our zero points are integer, and this is motivated by the // requirement to be able to represent the real value "0" exactly as a // quantized value, which is required in multiple places, for example in // Im2col with zero padding). int32_t nudged_zero_point = 0; if (initial_zero_point < qmin) { nudged_zero_point = qmin; } else if (initial_zero_point > qmax) { nudged_zero_point = qmax; } else { nudged_zero_point = nearbyint(initial_zero_point); } zero_point[i] = nudged_zero_point; } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <number of min/max values> <repeat>\n", argv[0]); return 1; } const int size = atoi(argv[1]); const int repeat = atoi(argv[2]); int32_t qmin = -127; int32_t qmax = 127; bool preserve_sparsity = true; size_t size_bytes = sizeof(float) * size; float *scale = (float*) malloc (size_bytes); float *scale_ref = (float*) malloc (size_bytes); int32_t *zp = (int32_t*) malloc (size_bytes); int32_t *zp_ref = (int32_t*) malloc (size_bytes); float *min = (float*) malloc (size_bytes); float *max = (float*) malloc (size_bytes); std::default_random_engine g (123); std::uniform_real_distribution<float> distr (-1.f, 1.f); for (int i = 0; i < size; i++) { min[i] = distr(g); max[i] = distr(g); } reference (min, max, qmin, qmax, size, preserve_sparsity, scale_ref, zp_ref); int32_t *d_zp; cudaMalloc((void**)&d_zp, size_bytes); float *d_scale; cudaMalloc((void**)&d_scale, size_bytes); float *d_min; cudaMalloc((void**)&d_min, size_bytes); cudaMemcpy(d_min, min, size_bytes, cudaMemcpyHostToDevice); float *d_max; cudaMalloc((void**)&d_max, size_bytes); cudaMemcpy(d_max, max, size_bytes, cudaMemcpyHostToDevice); const int block_size = 256; dim3 num_blocks = (size + block_size - 1) / block_size; dim3 num_threads(block_size); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { zero_point<<<num_blocks, num_threads>>>( d_min, d_max, qmin, qmax, size, preserve_sparsity, d_scale, d_zp); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of zero-point kernel: %f (us)\n", (time * 1e-3f) / repeat); cudaMemcpy(zp, d_zp, size_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(scale, d_scale, size_bytes, cudaMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < size; i++) { if (zp[i] != zp_ref[i] || scale[i] - scale_ref[i] > 1e-3f) { ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); cudaFree(d_zp); cudaFree(d_scale); cudaFree(d_min); cudaFree(d_max); free(zp); free(scale); free(zp_ref); free(scale_ref); free(min); free(max); return 0; }
6a06a975f4580bcaeb2e4a971ff7ef8d200c5e29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../CUDA/CUDA_func.h" #include "DenseLayer.h" using namespace std; namespace NN { namespace Layers { DenseLayer::DenseLayer(vector<int> dependencies, int layer_size) { this->dependencies = dependencies; output_size = layer_size; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void DenseLayer::compute() { float * prev_layer_output = prev_layer->get_output_iterator(); /*for(int i = 0; i < layer_size; i++) { output[i] = 0.0f; for(int j = 0; j < neuron_size; j++) { output[i] += parameters[i*(neuron_size + 1) + j] * prev_layer_output[j]; } output[i] += parameters[i*(neuron_size + 1) + neuron_size]; }*/ int block_size = (output_size + 255) / 256; hipLaunchKernelGGL(( NN::CUDA::compute_dense_layer), dim3(block_size), dim3(256), 0, 0, prev_layer_output, parameters, output, neuron_size, output_size); hipDeviceSynchronize(); } void DenseLayer::backpropagate() { float * prev_layer_output = prev_layer->get_output_iterator(); float * prev_layer_output_gradient = prev_layer->get_output_gradient_iterator(); /*for(int i = 0; i < neuron_size; i++) { prev_layer_output_gradient[i] = 0.0f; } for(int i = 0; i < layer_size; i++) { for(int j = 0; j < neuron_size; j++) { gradient[i*(neuron_size + 1) + j] += output_gradient[i] * prev_layer_output[j]; prev_layer_output_gradient[j] += output_gradient[i] * parameters[i*(neuron_size + 1) + j]; } gradient[i*(neuron_size + 1) + neuron_size] += output_gradient[i]; }*/ int block_size = (neuron_size + 255) / 256; hipLaunchKernelGGL(( NN::CUDA::backprop_dense_layer_input_gradient) , dim3(block_size), dim3(256) , 0, 0, prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, neuron_size, output_size); hipDeviceSynchronize(); block_size = (output_size + 255) / 256; NN::CUDA::backprop_dense_layer_bias << <block_size, 256 >> > (gradient, output_gradient, neuron_size, output_size); hipDeviceSynchronize(); } int DenseLayer::get_parameters_size() { return (neuron_size + 1) * output_size; } void DenseLayer::update_dependencies(vector<NN::Layers::Layer *> layer_dependencies) { prev_layer = layer_dependencies[0]; neuron_size = prev_layer->get_output_size(); } void DenseLayer::save(NN::File& file) { int id = 3; file.save(id); save_dependencies(file); file.save(output_size); } void DenseLayer::load(NN::File& file) { load_dependencies(file); file.load(output_size); hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } DenseLayer::~DenseLayer() = default; } }
6a06a975f4580bcaeb2e4a971ff7ef8d200c5e29.cu
#include "../CUDA/CUDA_func.h" #include "DenseLayer.h" using namespace std; namespace NN { namespace Layers { DenseLayer::DenseLayer(vector<int> dependencies, int layer_size) { this->dependencies = dependencies; output_size = layer_size; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void DenseLayer::compute() { float * prev_layer_output = prev_layer->get_output_iterator(); /*for(int i = 0; i < layer_size; i++) { output[i] = 0.0f; for(int j = 0; j < neuron_size; j++) { output[i] += parameters[i*(neuron_size + 1) + j] * prev_layer_output[j]; } output[i] += parameters[i*(neuron_size + 1) + neuron_size]; }*/ int block_size = (output_size + 255) / 256; NN::CUDA::compute_dense_layer<<<block_size, 256>>>(prev_layer_output, parameters, output, neuron_size, output_size); cudaDeviceSynchronize(); } void DenseLayer::backpropagate() { float * prev_layer_output = prev_layer->get_output_iterator(); float * prev_layer_output_gradient = prev_layer->get_output_gradient_iterator(); /*for(int i = 0; i < neuron_size; i++) { prev_layer_output_gradient[i] = 0.0f; } for(int i = 0; i < layer_size; i++) { for(int j = 0; j < neuron_size; j++) { gradient[i*(neuron_size + 1) + j] += output_gradient[i] * prev_layer_output[j]; prev_layer_output_gradient[j] += output_gradient[i] * parameters[i*(neuron_size + 1) + j]; } gradient[i*(neuron_size + 1) + neuron_size] += output_gradient[i]; }*/ int block_size = (neuron_size + 255) / 256; NN::CUDA::backprop_dense_layer_input_gradient <<<block_size, 256 >>> (prev_layer_output, prev_layer_output_gradient, parameters, gradient, output_gradient, neuron_size, output_size); cudaDeviceSynchronize(); block_size = (output_size + 255) / 256; NN::CUDA::backprop_dense_layer_bias << <block_size, 256 >> > (gradient, output_gradient, neuron_size, output_size); cudaDeviceSynchronize(); } int DenseLayer::get_parameters_size() { return (neuron_size + 1) * output_size; } void DenseLayer::update_dependencies(vector<NN::Layers::Layer *> layer_dependencies) { prev_layer = layer_dependencies[0]; neuron_size = prev_layer->get_output_size(); } void DenseLayer::save(NN::File& file) { int id = 3; file.save(id); save_dependencies(file); file.save(output_size); } void DenseLayer::load(NN::File& file) { load_dependencies(file); file.load(output_size); cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } DenseLayer::~DenseLayer() = default; } }
d83093a9f6c062a7153b71b3c11c9ff3eec33bad.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/random.h> #include <thrust/sequence.h> #include <stdio.h> #include <iostream> #include "Utilities.cuh" #include "TimingGPU.cuh" #include "csv.hpp" using namespace std; int main( int argc, char* argv[] ) { int N = atoi(argv[2]); thrust::host_vector<long> h_vec_1(N); thrust::host_vector<long> h_vec_2(N); const string csv_file = std::string(argv[1]); vector<vector<string>> data; Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; std::string timestamp = rec[0]; h_vec_1.push_back(std::stof(rec[0].c_str())); h_vec_2[row] = std::stof(rec[1]); } thrust::host_vector<long> dout_2(N); thrust::host_vector<long> dout_3(N); thrust::host_vector<long> d_vec_2(N); thrust::copy(h_vec_2.begin(), h_vec_2.end(), d_vec_2.begin()); thrust::inclusive_scan(d_vec_2.begin(), d_vec_2.end(), dout_2.begin()); thrust::exclusive_scan(d_vec_2.begin(), d_vec_2.end(), dout_3.begin()); for(int i=0;i<N;i++) { // cout << d_vec_2[i] << "," << dout_2[i] << "," << dout_3[i] << endl; cout << d_vec_2[i] << "," << dout_3[i] << endl; } return 0; }
d83093a9f6c062a7153b71b3c11c9ff3eec33bad.cu
#include <cublas_v2.h> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/random.h> #include <thrust/sequence.h> #include <stdio.h> #include <iostream> #include "Utilities.cuh" #include "TimingGPU.cuh" #include "csv.hpp" using namespace std; int main( int argc, char* argv[] ) { int N = atoi(argv[2]); thrust::host_vector<long> h_vec_1(N); thrust::host_vector<long> h_vec_2(N); const string csv_file = std::string(argv[1]); vector<vector<string>> data; Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; std::string timestamp = rec[0]; h_vec_1.push_back(std::stof(rec[0].c_str())); h_vec_2[row] = std::stof(rec[1]); } thrust::host_vector<long> dout_2(N); thrust::host_vector<long> dout_3(N); thrust::host_vector<long> d_vec_2(N); thrust::copy(h_vec_2.begin(), h_vec_2.end(), d_vec_2.begin()); thrust::inclusive_scan(d_vec_2.begin(), d_vec_2.end(), dout_2.begin()); thrust::exclusive_scan(d_vec_2.begin(), d_vec_2.end(), dout_3.begin()); for(int i=0;i<N;i++) { // cout << d_vec_2[i] << "," << dout_2[i] << "," << dout_3[i] << endl; cout << d_vec_2[i] << "," << dout_3[i] << endl; } return 0; }
005d5fc7b882ed70c042dedd7fe54c691d8fda62.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/relu_layer.hpp" #include "HugeCTR/include/layers/element_wise_function.hpp" #include <algorithm> #include <functional> #include "HugeCTR/include/utils.hpp" #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { ReluLayer::ReluLayer(const std::shared_ptr<Tensor<float>>& in_tensor, const std::shared_ptr<Tensor<float>>& out_tensor, int device_id) : Layer(device_id) { assert(get_size_from_dims(in_tensor->get_dims()) == get_size_from_dims(out_tensor->get_dims())); in_tensors_.emplace_back(in_tensor); out_tensors_.emplace_back(out_tensor); } void ReluLayer::fprop(hipStream_t stream) { const auto& in_tensor = in_tensors_[0]; const auto& out_tensor = out_tensors_[0]; auto fop = [] __device__(float in) { return (in < 0) ? 0 : in; }; internal::ElementWiseFunctor functor; functor.forward_evaluate(*in_tensor, *out_tensor, get_device_id(), fop, stream); } void ReluLayer::bprop(hipStream_t stream) { const auto& in_tensor = in_tensors_[0]; const auto& out_tensor = out_tensors_[0]; auto bop = [] __device__(float d_out, float d_in) { return (d_in < 0) ? 0 : d_out; }; internal::ElementWiseFunctor functor; functor.backward_evaluate(*in_tensor, *out_tensor, get_device_id(), bop, stream); } } // namespace HugeCTR
005d5fc7b882ed70c042dedd7fe54c691d8fda62.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/relu_layer.hpp" #include "HugeCTR/include/layers/element_wise_function.hpp" #include <algorithm> #include <functional> #include "HugeCTR/include/utils.hpp" #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { ReluLayer::ReluLayer(const std::shared_ptr<Tensor<float>>& in_tensor, const std::shared_ptr<Tensor<float>>& out_tensor, int device_id) : Layer(device_id) { assert(get_size_from_dims(in_tensor->get_dims()) == get_size_from_dims(out_tensor->get_dims())); in_tensors_.emplace_back(in_tensor); out_tensors_.emplace_back(out_tensor); } void ReluLayer::fprop(cudaStream_t stream) { const auto& in_tensor = in_tensors_[0]; const auto& out_tensor = out_tensors_[0]; auto fop = [] __device__(float in) { return (in < 0) ? 0 : in; }; internal::ElementWiseFunctor functor; functor.forward_evaluate(*in_tensor, *out_tensor, get_device_id(), fop, stream); } void ReluLayer::bprop(cudaStream_t stream) { const auto& in_tensor = in_tensors_[0]; const auto& out_tensor = out_tensors_[0]; auto bop = [] __device__(float d_out, float d_in) { return (d_in < 0) ? 0 : d_out; }; internal::ElementWiseFunctor functor; functor.backward_evaluate(*in_tensor, *out_tensor, get_device_id(), bop, stream); } } // namespace HugeCTR
b92e629ddb045205aa1c1f5433ba2713b272291d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <algorithm> using namespace std; #define N 4096 #define RADIUS 3 #define BLOCK_SIZE 16 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void fill_ints(int *x, int n) { fill_n(x, n, 1); } int main(void) { int *in, *out; // host copies of a, b, c int *d_in, *d_out; // device copies of a, b, c // Alloc space for host copies and setup values int size = (N + 2*RADIUS) * sizeof(int); in = (int *)malloc(size); fill_ints(in, N + 2*RADIUS); out = (int *)malloc(size); fill_ints(out, N + 2*RADIUS); // Alloc space for device copies hipMalloc((void **)&d_in, size); hipMalloc((void **)&d_out, size); // Copy to device hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); hipMemcpy(d_out, out, size, hipMemcpyHostToDevice); // Launch stencil_1d() kernel on GPU hipLaunchKernelGGL(( stencil_1d), dim3(N/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, d_in+RADIUS, d_out+RADIUS); // Copy result back to host hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); // Error Checking for (int i = 0; i < N + 2*RADIUS; i++) { if (i<RADIUS || i>=N+RADIUS){ if (out[i] != 1) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1); } else { if (out[i] != 1 + 2*RADIUS) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1 + 2*RADIUS); } } // Cleanup free(in); free(out); hipFree(d_in); hipFree(d_out); printf("Success!\n"); return 0; }
b92e629ddb045205aa1c1f5433ba2713b272291d.cu
#include <stdio.h> #include <algorithm> using namespace std; #define N 4096 #define RADIUS 3 #define BLOCK_SIZE 16 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void fill_ints(int *x, int n) { fill_n(x, n, 1); } int main(void) { int *in, *out; // host copies of a, b, c int *d_in, *d_out; // device copies of a, b, c // Alloc space for host copies and setup values int size = (N + 2*RADIUS) * sizeof(int); in = (int *)malloc(size); fill_ints(in, N + 2*RADIUS); out = (int *)malloc(size); fill_ints(out, N + 2*RADIUS); // Alloc space for device copies cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); // Copy to device cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); cudaMemcpy(d_out, out, size, cudaMemcpyHostToDevice); // Launch stencil_1d() kernel on GPU stencil_1d<<<N/BLOCK_SIZE,BLOCK_SIZE>>>(d_in+RADIUS, d_out+RADIUS); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); // Error Checking for (int i = 0; i < N + 2*RADIUS; i++) { if (i<RADIUS || i>=N+RADIUS){ if (out[i] != 1) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1); } else { if (out[i] != 1 + 2*RADIUS) printf("Mismatch at index %d, was: %d, should be: %d\n", i, out[i], 1 + 2*RADIUS); } } // Cleanup free(in); free(out); cudaFree(d_in); cudaFree(d_out); printf("Success!\n"); return 0; }
f565c78a11609a516edca7cd50de41970db2cd12.hip
// !!! This is a file automatically generated by hipify!!! #include "fir_gpu.h" #include "cuda_timer.h" #include <iostream> #include <hip/hip_runtime.h> #define BLOCK_SIZE 64 // Baseline __global__ void fir_kernel1(const float *coeffs, const float *input, float *output, int length, int filterLength) { int id = blockIdx.x * blockDim.x + threadIdx.x; //if(id < length-filterLength){ // TODO float sum = 0.0f; for(int i = 0; i < filterLength; i++) { sum += coeffs[i]*input[i+id]; } output[id] = sum; } //} // Coefficients in shared memory // Here we suppose that filterLength and BLOCK_SIZE is always 64 __global__ void fir_kernel2(const float *coeffs, const float *input, float *output, int length, int filterLength) { //TODO int id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float c[64]; //for(int i=0; i<64;i++) //{c[i]=coeffs[i];} c[threadIdx.x] = coeffs[threadIdx.x]; __syncthreads(); // TODO float sum = 0.0f; for(int i = 0; i < filterLength; ++i) { sum += c[i]*input[i+id]; output[id] = sum; } } // Coefficients and inputs in shared memory // Here we suppose that filterLength and BLOCK_SIZE is always 64 __global__ void fir_kernel3(const float *coeffs, const float *input, float *output, int length, int filterLength) { // TODO int id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float c[64]; c[threadIdx.x] = coeffs[threadIdx.x]; __shared__ float k[BLOCK_SIZE+64]; k[threadIdx.x] = input[blockDim.x*blockIdx.x + threadIdx.x]; k[threadIdx.x+BLOCK_SIZE] = input[blockDim.x*blockIdx.x + threadIdx.x+BLOCK_SIZE]; __syncthreads(); float sum = 0.0f; for(int i = 0; i < filterLength; ++i) { sum += c[i]*k[threadIdx.x+i]; } output[id] = sum; } inline int divup(int a, int b) { if (a % b) return a / b + 1; else return a / b; } void fir_gpu(const float *coeffs, const float *input, float *output, int length, int filterLength) { const int output_size = length - filterLength; CudaSynchronizedTimer timer; int grid_size = divup( length, BLOCK_SIZE); // calculate the grid size const int block_size = BLOCK_SIZE; dim3 block(block_size, 1, 1); dim3 grid(grid_size, 1, 1); timer.start(); // TODO Launch kernel here //void fir_kernel1(const float *coeffs, const float *input, float *output, int length, int filterLength) // fir_kernel1<<<grid,block>>>(coeffs, input, output, output_size, filterLength); //fir_kernel2<<<grid,block>>>(coeffs, input, output, output_size, 64); hipLaunchKernelGGL(( fir_kernel3), dim3(grid),dim3(block), 0, 0, coeffs, input, output, output_size, 64); timer.stop(); hipDeviceSynchronize(); CudaCheckError(); float time_gpu = timer.getElapsed(); //std::cout << "Kernel Time: " << time_gpu << "ms\n"; }
f565c78a11609a516edca7cd50de41970db2cd12.cu
#include "fir_gpu.h" #include "cuda_timer.h" #include <iostream> #include <cuda_runtime.h> #define BLOCK_SIZE 64 // Baseline __global__ void fir_kernel1(const float *coeffs, const float *input, float *output, int length, int filterLength) { int id = blockIdx.x * blockDim.x + threadIdx.x; //if(id < length-filterLength){ // TODO float sum = 0.0f; for(int i = 0; i < filterLength; i++) { sum += coeffs[i]*input[i+id]; } output[id] = sum; } //} // Coefficients in shared memory // Here we suppose that filterLength and BLOCK_SIZE is always 64 __global__ void fir_kernel2(const float *coeffs, const float *input, float *output, int length, int filterLength) { //TODO int id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float c[64]; //for(int i=0; i<64;i++) //{c[i]=coeffs[i];} c[threadIdx.x] = coeffs[threadIdx.x]; __syncthreads(); // TODO float sum = 0.0f; for(int i = 0; i < filterLength; ++i) { sum += c[i]*input[i+id]; output[id] = sum; } } // Coefficients and inputs in shared memory // Here we suppose that filterLength and BLOCK_SIZE is always 64 __global__ void fir_kernel3(const float *coeffs, const float *input, float *output, int length, int filterLength) { // TODO int id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float c[64]; c[threadIdx.x] = coeffs[threadIdx.x]; __shared__ float k[BLOCK_SIZE+64]; k[threadIdx.x] = input[blockDim.x*blockIdx.x + threadIdx.x]; k[threadIdx.x+BLOCK_SIZE] = input[blockDim.x*blockIdx.x + threadIdx.x+BLOCK_SIZE]; __syncthreads(); float sum = 0.0f; for(int i = 0; i < filterLength; ++i) { sum += c[i]*k[threadIdx.x+i]; } output[id] = sum; } inline int divup(int a, int b) { if (a % b) return a / b + 1; else return a / b; } void fir_gpu(const float *coeffs, const float *input, float *output, int length, int filterLength) { const int output_size = length - filterLength; CudaSynchronizedTimer timer; int grid_size = divup( length, BLOCK_SIZE); // calculate the grid size const int block_size = BLOCK_SIZE; dim3 block(block_size, 1, 1); dim3 grid(grid_size, 1, 1); timer.start(); // TODO Launch kernel here //void fir_kernel1(const float *coeffs, const float *input, float *output, int length, int filterLength) // fir_kernel1<<<grid,block>>>(coeffs, input, output, output_size, filterLength); //fir_kernel2<<<grid,block>>>(coeffs, input, output, output_size, 64); fir_kernel3<<<grid,block>>>(coeffs, input, output, output_size, 64); timer.stop(); cudaDeviceSynchronize(); CudaCheckError(); float time_gpu = timer.getElapsed(); //std::cout << "Kernel Time: " << time_gpu << "ms\n"; }
f6ce0ecb528388d650e91f3b16fa2867e0ae072b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************************** deldopoffs.c Takes the delay-correction polynomial for a delay-doppler set and figures out the COM delay and doppler corrections (in units of image rows and columns) for each frame. Modified 2015 June 3 by CM: Implement smearing for the "fit" and "write" actions Modified 2006 June 21 by CM: Changed delres to del_per_pixel and dopres to dop_per_pixel *****************************************************************************************/ extern "C" { #include "../shape/head.h" } __global__ void deldopoffs_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.delcor.n; n++) { //printf("delcor.a[%i].val=%g\n", n, ddat->set[s].desc.deldop.delcor.a[n].val); dop += n*ddat->set[s].desc.deldop.delcor.a[n].val*x; del += ddat->set[s].desc.deldop.delcor.a[n].val*(x*=arg); // printf("dop at n=%i: %g\n", n, dop); // printf("del at n=%i: %g\n", n, del); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.Ftx / (ddat->set[s].desc.deldop.dop_per_pixel*86400.0); } } } __global__ void deldopoffs_MFS_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.frame[f].nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.frame[f].delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.frame[f].delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.frame[f].delcor.n; n++) { dop += n*ddat->set[s].desc.deldop.frame[f].delcor.a[n].val*x; del += ddat->set[s].desc.deldop.frame[f].delcor.a[n].val*(x*=arg); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.frame[f].del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.frame[f].Ftx / (ddat->set[s].desc.deldop.frame[f].dop_per_pixel*86400.0); } } } __host__ void deldopoffs_gpu(struct dat_t *ddat, int s, int nframes) { dim3 BLK,THD; /* Launch nframes-threaded kernel */ THD.x = nframes; hipLaunchKernelGGL(( deldopoffs_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, s, nframes); checkErrorAfterKernelLaunch("deldopoffs_cuda_krnl (deldopoffs_cuda)"); }
f6ce0ecb528388d650e91f3b16fa2867e0ae072b.cu
/***************************************************************************************** deldopoffs.c Takes the delay-correction polynomial for a delay-doppler set and figures out the COM delay and doppler corrections (in units of image rows and columns) for each frame. Modified 2015 June 3 by CM: Implement smearing for the "fit" and "write" actions Modified 2006 June 21 by CM: Changed delres to del_per_pixel and dopres to dop_per_pixel *****************************************************************************************/ extern "C" { #include "../shape/head.h" } __global__ void deldopoffs_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.delcor.n; n++) { //printf("delcor.a[%i].val=%g\n", n, ddat->set[s].desc.deldop.delcor.a[n].val); dop += n*ddat->set[s].desc.deldop.delcor.a[n].val*x; del += ddat->set[s].desc.deldop.delcor.a[n].val*(x*=arg); // printf("dop at n=%i: %g\n", n, dop); // printf("del at n=%i: %g\n", n, del); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.Ftx / (ddat->set[s].desc.deldop.dop_per_pixel*86400.0); } } } __global__ void deldopoffs_MFS_krnl(struct dat_t *ddat, int s, int nframes) { /* nframes-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int k, n; double del, dop, arg, x; if (f < nframes) { for (k=0; k<ddat->set[s].desc.deldop.frame[f].nviews; k++) { x = 1.0; dop = 0.0; del = ddat->set[s].desc.deldop.frame[f].delcor.a[0].val; arg = ddat->set[s].desc.deldop.frame[f].view[k].t - ddat->set[s].desc.deldop.frame[f].delcor.t0; for (n=1; n<=ddat->set[s].desc.deldop.frame[f].delcor.n; n++) { dop += n*ddat->set[s].desc.deldop.frame[f].delcor.a[n].val*x; del += ddat->set[s].desc.deldop.frame[f].delcor.a[n].val*(x*=arg); } /* del has units of usec */ ddat->set[s].desc.deldop.frame[f].view[k].deloff = del/ddat->set[s].desc.deldop.frame[f].del_per_pixel; /* dop has units of usec/day and there are 86400 sec/day */ ddat->set[s].desc.deldop.frame[f].view[k].dopoff = -dop*ddat->set[s].desc.deldop.frame[f].Ftx / (ddat->set[s].desc.deldop.frame[f].dop_per_pixel*86400.0); } } } __host__ void deldopoffs_gpu(struct dat_t *ddat, int s, int nframes) { dim3 BLK,THD; /* Launch nframes-threaded kernel */ THD.x = nframes; deldopoffs_krnl<<<BLK,THD>>>(ddat, s, nframes); checkErrorAfterKernelLaunch("deldopoffs_cuda_krnl (deldopoffs_cuda)"); }
d301266b31d02cf7a5a8086d5cf202237f580d39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "globals.h" #include "cuda_functions.h" #include "cuda_globals.h" #include "cuda_main.h" #include "cuda_math.h" void runSimulation(myprec *par1, myprec *par2, myprec *time) { myprec h_dt,h_dpdz; /* allocating temporary arrays and streams */ void (*RHSDeviceDir[3])(myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*); RHSDeviceDir[0] = RHSDeviceSharedFlxX; RHSDeviceDir[1] = RHSDeviceSharedFlxY_new; RHSDeviceDir[2] = RHSDeviceSharedFlxZ_new; hipStream_t s[3]; for (int i=0; i<3; i++) { checkCuda( hipStreamCreateWithFlags(&s[i], hipStreamNonBlocking) ); } for (int istep = 0; istep < nsteps; istep++) { hipLaunchKernelGGL(( calcState), dim3(grid0),dim3(block0), 0, 0, d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); hipDeviceSynchronize(); if(istep%checkCFLcondition==0) { calcTimeStep(dtC,d_r,d_u,d_v,d_w,d_e,d_m); if(forcing) calcPressureGrad(dpdz,d_r,d_w); hipMemcpy(&h_dt , dtC , sizeof(myprec), hipMemcpyDeviceToHost); hipMemcpy(&h_dpdz, dpdz, sizeof(myprec), hipMemcpyDeviceToHost); printf("step number %d with %lf %lf\n",istep,h_dt,h_dpdz); } if(istep>0) hipLaunchKernelGGL(( deviceSumOne), dim3(1),dim3(1), 0, 0, &time[istep],&time[istep-1] ,dtC); if(istep==0)hipLaunchKernelGGL(( deviceSumOne), dim3(1),dim3(1), 0, 0, &time[istep],&time[nsteps-1],dtC); if(istep%checkBulk==0) calcBulk(&par1[istep],&par2[istep],d_r,d_w,d_e); hipLaunchKernelGGL(( deviceMul), dim3(grid0),dim3(block0),0,s[0], d_uO,d_r,d_u); hipLaunchKernelGGL(( deviceMul), dim3(grid0),dim3(block0),0,s[1], d_vO,d_r,d_v); hipLaunchKernelGGL(( deviceMul), dim3(grid0),dim3(block0),0,s[2], d_wO,d_r,d_w); hipLaunchKernelGGL(( deviceCpy), dim3(grid0),dim3(block0), 0, 0, d_rO,d_r); hipLaunchKernelGGL(( deviceCpy), dim3(grid0),dim3(block0), 0, 0, d_eO,d_e); /* rk step 1 */ hipDeviceSynchronize(); hipLaunchKernelGGL(( calcStressX), dim3(d_grid[0]),dim3(d_block[0]),0,s[0], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressY), dim3(d_grid[3]),dim3(d_block[3]),0,s[1], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressZ), dim3(d_grid[4]),dim3(d_block[4]),0,s[2], d_u,d_v,d_w); hipDeviceSynchronize(); hipLaunchKernelGGL(( calcDil), dim3(grid0),dim3(block0), 0, 0, d_dil); hipDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDirhipLaunchKernelGGL(([d)], dim3(d_grid([d)]),dim3(d_block([d)]),0,s([d)], d_rhsr1[d],d_rhsu1[d],d_rhsv1[d],d_rhsw1[d],d_rhse1[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDirhipLaunchKernelGGL(([0)], dim3(d_grid([0)]),dim3(d_block([0)]), 0, 0, d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([1)], dim3(d_grid([1)]),dim3(d_block([1)]), 0, 0, d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([2)], dim3(d_grid([2)]),dim3(d_block([2)]), 0, 0, d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( eulerSum), dim3(grid0),dim3(block0), 0, 0, d_r,d_rO,d_rhsr1[d],dtC,d); hipLaunchKernelGGL(( eulerSum), dim3(grid0),dim3(block0), 0, 0, d_e,d_eO,d_rhse1[d],dtC,d); } hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( eulerSumR), dim3(grid0),dim3(block0),0,s[0], d_u,d_uO,d_rhsu1[d],d_r,dtC,d); hipLaunchKernelGGL(( eulerSumR), dim3(grid0),dim3(block0),0,s[1], d_v,d_vO,d_rhsv1[d],d_r,dtC,d); hipLaunchKernelGGL(( eulerSumR), dim3(grid0),dim3(block0),0,s[2], d_w,d_wO,d_rhsw1[d],d_r,dtC,d); } hipDeviceSynchronize(); //rk step 2 hipLaunchKernelGGL(( calcState), dim3(grid0),dim3(block0), 0, 0, d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); hipLaunchKernelGGL(( calcStressX), dim3(d_grid[0]),dim3(d_block[0]),0,s[0], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressY), dim3(d_grid[3]),dim3(d_block[3]),0,s[1], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressZ), dim3(d_grid[4]),dim3(d_block[4]),0,s[2], d_u,d_v,d_w); hipDeviceSynchronize(); hipLaunchKernelGGL(( calcDil), dim3(grid0),dim3(block0), 0, 0, d_dil); hipDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDirhipLaunchKernelGGL(([d)], dim3(d_grid([d)]),dim3(d_block([d)]),0,s([d)], d_rhsr2[d],d_rhsu2[d],d_rhsv2[d],d_rhsw2[d],d_rhse2[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDirhipLaunchKernelGGL(([0)], dim3(d_grid([0)]),dim3(d_block([0)]), 0, 0, d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([1)], dim3(d_grid([1)]),dim3(d_block([1)]), 0, 0, d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([2)], dim3(d_grid([2)]),dim3(d_block([2)]), 0, 0, d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( eulerSum3), dim3(grid0),dim3(block0), 0, 0, d_r,d_rO,d_rhsr1[d],d_rhsr2[d],dtC,d); hipLaunchKernelGGL(( eulerSum3), dim3(grid0),dim3(block0), 0, 0, d_e,d_eO,d_rhse1[d],d_rhse2[d],dtC,d); } hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( eulerSum3R), dim3(grid0),dim3(block0),0,s[0], d_u,d_uO,d_rhsu1[d],d_rhsu2[d],d_r,dtC,d); hipLaunchKernelGGL(( eulerSum3R), dim3(grid0),dim3(block0),0,s[1], d_v,d_vO,d_rhsv1[d],d_rhsv2[d],d_r,dtC,d); hipLaunchKernelGGL(( eulerSum3R), dim3(grid0),dim3(block0),0,s[2], d_w,d_wO,d_rhsw1[d],d_rhsw2[d],d_r,dtC,d); } hipDeviceSynchronize(); //rk step 3 hipLaunchKernelGGL(( calcState), dim3(grid0),dim3(block0), 0, 0, d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); hipLaunchKernelGGL(( calcStressX), dim3(d_grid[0]),dim3(d_block[0]),0,s[0], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressY), dim3(d_grid[3]),dim3(d_block[3]),0,s[1], d_u,d_v,d_w); hipLaunchKernelGGL(( calcStressZ), dim3(d_grid[4]),dim3(d_block[4]),0,s[2], d_u,d_v,d_w); hipDeviceSynchronize(); hipLaunchKernelGGL(( calcDil), dim3(grid0),dim3(block0), 0, 0, d_dil); hipDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDirhipLaunchKernelGGL(([d)], dim3(d_grid([d)]),dim3(d_block([d)]),0,s([d)], d_rhsr3[d],d_rhsu3[d],d_rhsv3[d],d_rhsw3[d],d_rhse3[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDirhipLaunchKernelGGL(([0)], dim3(d_grid([0)]),dim3(d_block([0)]), 0, 0, d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([1)], dim3(d_grid([1)]),dim3(d_block([1)]), 0, 0, d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDirhipLaunchKernelGGL(([2)], dim3(d_grid([2)]),dim3(d_block([2)]), 0, 0, d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( rk3final), dim3(grid0),dim3(block0), 0, 0, d_r,d_rO,d_rhsr1[d],d_rhsr2[d],d_rhsr3[d],dtC,d); hipLaunchKernelGGL(( rk3final), dim3(grid0),dim3(block0), 0, 0, d_e,d_eO,d_rhse1[d],d_rhse2[d],d_rhse3[d],dtC,d); } hipDeviceSynchronize(); for (int d=0; d<fin; d++) { hipLaunchKernelGGL(( rk3finalR), dim3(grid0),dim3(block0),0,s[0], d_u,d_uO,d_rhsu1[d],d_rhsu2[d],d_rhsu3[d],d_r,dtC,d); hipLaunchKernelGGL(( rk3finalR), dim3(grid0),dim3(block0),0,s[1], d_v,d_vO,d_rhsv1[d],d_rhsv2[d],d_rhsv3[d],d_r,dtC,d); hipLaunchKernelGGL(( rk3finalR), dim3(grid0),dim3(block0),0,s[2], d_w,d_wO,d_rhsw1[d],d_rhsw2[d],d_rhsw3[d],d_r,dtC,d); } hipDeviceSynchronize(); } for (int i=0; i<3; i++) { checkCuda( hipStreamDestroy(s[i]) ); } } __global__ void eulerSum(myprec *a, myprec *b, myprec *c, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = (b[id.g] + c[id.g]*(*dt)/2.0); } else { a[id.g] += ( c[id.g]*(*dt)/2.0 ); } } __global__ void eulerSumR(myprec *a, myprec *b, myprec *c, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = (b[id.g] + c[id.g]*(*dt)/2.0)/r[id.g]; } else { a[id.g] += ( c[id.g]*(*dt)/2.0 )/r[id.g]; } } __global__ void eulerSum3(myprec *a, myprec *b, myprec *c1, myprec *c2, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = b[id.g] + (2*c2[id.g] - c1[id.g])*(*dt); } else { a[id.g] += ( 2*c2[id.g] - c1[id.g] )*(*dt); } } __global__ void eulerSum3R(myprec *a, myprec *b, myprec *c1, myprec *c2, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0 ) { a[id.g] = ( b[id.g] + (2*c2[id.g] - c1[id.g])*(*dt) )/r[id.g]; } else { a[id.g] += ( 2*c2[id.g] - c1[id.g] )*(*dt) / r[id.g]; } } __global__ void rk3final(myprec *a1, myprec *a2, myprec *b, myprec *c, myprec *d, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a1[id.g] = a2[id.g] + (*dt)*( b[id.g] + 4*c[id.g] + d[id.g])/6.; } else { a1[id.g] += (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. ; } } __global__ void rk3finalR(myprec *a1, myprec *a2, myprec *b, myprec *c, myprec *d, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a1[id.g] = ( a2[id.g] + (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. )/ r[id.g]; } else { a1[id.g] += ( (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. )/ r[id.g]; } } __global__ void calcState(myprec *rho, myprec *uvel, myprec *vvel, myprec *wvel, myprec *ret, myprec *ht, myprec *tem, myprec *pre, myprec *mu, myprec *lam) { int threadsPerBlock = blockDim.x * blockDim.y; int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y; int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y; int gt = blockNumInGrid * threadsPerBlock + threadNumInBlock; myprec cvInv = (gamma - 1.0)/Rgas; myprec invrho = 1.0/rho[gt]; myprec en = ret[gt]*invrho - 0.5*(uvel[gt]*uvel[gt] + vvel[gt]*vvel[gt] + wvel[gt]*wvel[gt]); tem[gt] = cvInv*en; pre[gt] = rho[gt]*Rgas*tem[gt]; ht[gt] = (ret[gt] + pre[gt])*invrho; myprec suth = pow(tem[gt],viscexp); mu[gt] = suth/Re; lam[gt] = suth/Re/Pr/Ec; __syncthreads(); }
d301266b31d02cf7a5a8086d5cf202237f580d39.cu
#include "globals.h" #include "cuda_functions.h" #include "cuda_globals.h" #include "cuda_main.h" #include "cuda_math.h" void runSimulation(myprec *par1, myprec *par2, myprec *time) { myprec h_dt,h_dpdz; /* allocating temporary arrays and streams */ void (*RHSDeviceDir[3])(myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*, myprec*); RHSDeviceDir[0] = RHSDeviceSharedFlxX; RHSDeviceDir[1] = RHSDeviceSharedFlxY_new; RHSDeviceDir[2] = RHSDeviceSharedFlxZ_new; cudaStream_t s[3]; for (int i=0; i<3; i++) { checkCuda( cudaStreamCreateWithFlags(&s[i], cudaStreamNonBlocking) ); } for (int istep = 0; istep < nsteps; istep++) { calcState<<<grid0,block0>>>(d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); cudaDeviceSynchronize(); if(istep%checkCFLcondition==0) { calcTimeStep(dtC,d_r,d_u,d_v,d_w,d_e,d_m); if(forcing) calcPressureGrad(dpdz,d_r,d_w); cudaMemcpy(&h_dt , dtC , sizeof(myprec), cudaMemcpyDeviceToHost); cudaMemcpy(&h_dpdz, dpdz, sizeof(myprec), cudaMemcpyDeviceToHost); printf("step number %d with %lf %lf\n",istep,h_dt,h_dpdz); } if(istep>0) deviceSumOne<<<1,1>>>(&time[istep],&time[istep-1] ,dtC); if(istep==0) deviceSumOne<<<1,1>>>(&time[istep],&time[nsteps-1],dtC); if(istep%checkBulk==0) calcBulk(&par1[istep],&par2[istep],d_r,d_w,d_e); deviceMul<<<grid0,block0,0,s[0]>>>(d_uO,d_r,d_u); deviceMul<<<grid0,block0,0,s[1]>>>(d_vO,d_r,d_v); deviceMul<<<grid0,block0,0,s[2]>>>(d_wO,d_r,d_w); deviceCpy<<<grid0,block0>>>(d_rO,d_r); deviceCpy<<<grid0,block0>>>(d_eO,d_e); /* rk step 1 */ cudaDeviceSynchronize(); calcStressX<<<d_grid[0],d_block[0],0,s[0]>>>(d_u,d_v,d_w); calcStressY<<<d_grid[3],d_block[3],0,s[1]>>>(d_u,d_v,d_w); calcStressZ<<<d_grid[4],d_block[4],0,s[2]>>>(d_u,d_v,d_w); cudaDeviceSynchronize(); calcDil<<<grid0,block0>>>(d_dil); cudaDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDir[d]<<<d_grid[d],d_block[d],0,s[d]>>>(d_rhsr1[d],d_rhsu1[d],d_rhsv1[d],d_rhsw1[d],d_rhse1[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDir[0]<<<d_grid[0],d_block[0]>>>(d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[1]<<<d_grid[1],d_block[1]>>>(d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[2]<<<d_grid[2],d_block[2]>>>(d_rhsr1[0],d_rhsu1[0],d_rhsv1[0],d_rhsw1[0],d_rhse1[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { eulerSum<<<grid0,block0>>>(d_r,d_rO,d_rhsr1[d],dtC,d); eulerSum<<<grid0,block0>>>(d_e,d_eO,d_rhse1[d],dtC,d); } cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { eulerSumR<<<grid0,block0,0,s[0]>>>(d_u,d_uO,d_rhsu1[d],d_r,dtC,d); eulerSumR<<<grid0,block0,0,s[1]>>>(d_v,d_vO,d_rhsv1[d],d_r,dtC,d); eulerSumR<<<grid0,block0,0,s[2]>>>(d_w,d_wO,d_rhsw1[d],d_r,dtC,d); } cudaDeviceSynchronize(); //rk step 2 calcState<<<grid0,block0>>>(d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); calcStressX<<<d_grid[0],d_block[0],0,s[0]>>>(d_u,d_v,d_w); calcStressY<<<d_grid[3],d_block[3],0,s[1]>>>(d_u,d_v,d_w); calcStressZ<<<d_grid[4],d_block[4],0,s[2]>>>(d_u,d_v,d_w); cudaDeviceSynchronize(); calcDil<<<grid0,block0>>>(d_dil); cudaDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDir[d]<<<d_grid[d],d_block[d],0,s[d]>>>(d_rhsr2[d],d_rhsu2[d],d_rhsv2[d],d_rhsw2[d],d_rhse2[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDir[0]<<<d_grid[0],d_block[0]>>>(d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[1]<<<d_grid[1],d_block[1]>>>(d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[2]<<<d_grid[2],d_block[2]>>>(d_rhsr2[0],d_rhsu2[0],d_rhsv2[0],d_rhsw2[0],d_rhse2[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { eulerSum3<<<grid0,block0>>>(d_r,d_rO,d_rhsr1[d],d_rhsr2[d],dtC,d); eulerSum3<<<grid0,block0>>>(d_e,d_eO,d_rhse1[d],d_rhse2[d],dtC,d); } cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { eulerSum3R<<<grid0,block0,0,s[0]>>>(d_u,d_uO,d_rhsu1[d],d_rhsu2[d],d_r,dtC,d); eulerSum3R<<<grid0,block0,0,s[1]>>>(d_v,d_vO,d_rhsv1[d],d_rhsv2[d],d_r,dtC,d); eulerSum3R<<<grid0,block0,0,s[2]>>>(d_w,d_wO,d_rhsw1[d],d_rhsw2[d],d_r,dtC,d); } cudaDeviceSynchronize(); //rk step 3 calcState<<<grid0,block0>>>(d_r,d_u,d_v,d_w,d_e,d_h,d_t,d_p,d_m,d_l); calcStressX<<<d_grid[0],d_block[0],0,s[0]>>>(d_u,d_v,d_w); calcStressY<<<d_grid[3],d_block[3],0,s[1]>>>(d_u,d_v,d_w); calcStressZ<<<d_grid[4],d_block[4],0,s[2]>>>(d_u,d_v,d_w); cudaDeviceSynchronize(); calcDil<<<grid0,block0>>>(d_dil); cudaDeviceSynchronize(); #if useStreams for (int d = 0; d < 3; d++) RHSDeviceDir[d]<<<d_grid[d],d_block[d],0,s[d]>>>(d_rhsr3[d],d_rhsu3[d],d_rhsv3[d],d_rhsw3[d],d_rhse3[d],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #else RHSDeviceDir[0]<<<d_grid[0],d_block[0]>>>(d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[1]<<<d_grid[1],d_block[1]>>>(d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); RHSDeviceDir[2]<<<d_grid[2],d_block[2]>>>(d_rhsr3[0],d_rhsu3[0],d_rhsv3[0],d_rhsw3[0],d_rhse3[0],d_r,d_u,d_v,d_w,d_h,d_t,d_p,d_m,d_l,d_dil,dpdz); #endif cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { rk3final<<<grid0,block0>>>(d_r,d_rO,d_rhsr1[d],d_rhsr2[d],d_rhsr3[d],dtC,d); rk3final<<<grid0,block0>>>(d_e,d_eO,d_rhse1[d],d_rhse2[d],d_rhse3[d],dtC,d); } cudaDeviceSynchronize(); for (int d=0; d<fin; d++) { rk3finalR<<<grid0,block0,0,s[0]>>>(d_u,d_uO,d_rhsu1[d],d_rhsu2[d],d_rhsu3[d],d_r,dtC,d); rk3finalR<<<grid0,block0,0,s[1]>>>(d_v,d_vO,d_rhsv1[d],d_rhsv2[d],d_rhsv3[d],d_r,dtC,d); rk3finalR<<<grid0,block0,0,s[2]>>>(d_w,d_wO,d_rhsw1[d],d_rhsw2[d],d_rhsw3[d],d_r,dtC,d); } cudaDeviceSynchronize(); } for (int i=0; i<3; i++) { checkCuda( cudaStreamDestroy(s[i]) ); } } __global__ void eulerSum(myprec *a, myprec *b, myprec *c, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = (b[id.g] + c[id.g]*(*dt)/2.0); } else { a[id.g] += ( c[id.g]*(*dt)/2.0 ); } } __global__ void eulerSumR(myprec *a, myprec *b, myprec *c, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = (b[id.g] + c[id.g]*(*dt)/2.0)/r[id.g]; } else { a[id.g] += ( c[id.g]*(*dt)/2.0 )/r[id.g]; } } __global__ void eulerSum3(myprec *a, myprec *b, myprec *c1, myprec *c2, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a[id.g] = b[id.g] + (2*c2[id.g] - c1[id.g])*(*dt); } else { a[id.g] += ( 2*c2[id.g] - c1[id.g] )*(*dt); } } __global__ void eulerSum3R(myprec *a, myprec *b, myprec *c1, myprec *c2, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0 ) { a[id.g] = ( b[id.g] + (2*c2[id.g] - c1[id.g])*(*dt) )/r[id.g]; } else { a[id.g] += ( 2*c2[id.g] - c1[id.g] )*(*dt) / r[id.g]; } } __global__ void rk3final(myprec *a1, myprec *a2, myprec *b, myprec *c, myprec *d, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a1[id.g] = a2[id.g] + (*dt)*( b[id.g] + 4*c[id.g] + d[id.g])/6.; } else { a1[id.g] += (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. ; } } __global__ void rk3finalR(myprec *a1, myprec *a2, myprec *b, myprec *c, myprec *d, myprec *r, myprec *dt, int i) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); if(i==0) { a1[id.g] = ( a2[id.g] + (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. )/ r[id.g]; } else { a1[id.g] += ( (*dt)*( b[id.g] + 4*c[id.g] + d[id.g] )/6. )/ r[id.g]; } } __global__ void calcState(myprec *rho, myprec *uvel, myprec *vvel, myprec *wvel, myprec *ret, myprec *ht, myprec *tem, myprec *pre, myprec *mu, myprec *lam) { int threadsPerBlock = blockDim.x * blockDim.y; int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y; int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y; int gt = blockNumInGrid * threadsPerBlock + threadNumInBlock; myprec cvInv = (gamma - 1.0)/Rgas; myprec invrho = 1.0/rho[gt]; myprec en = ret[gt]*invrho - 0.5*(uvel[gt]*uvel[gt] + vvel[gt]*vvel[gt] + wvel[gt]*wvel[gt]); tem[gt] = cvInv*en; pre[gt] = rho[gt]*Rgas*tem[gt]; ht[gt] = (ret[gt] + pre[gt])*invrho; myprec suth = pow(tem[gt],viscexp); mu[gt] = suth/Re; lam[gt] = suth/Re/Pr/Ec; __syncthreads(); }
f6a3fc1820b5e11b04de775d28917be93f7a033b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "lower_right_opt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); int *input_itemsets = NULL; hipMalloc(&input_itemsets, XSIZE*YSIZE); int *reference = NULL; hipMalloc(&reference, XSIZE*YSIZE); int max_rows = 1; int max_cols = 1; int i = 1; int penalty = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( lower_right_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( lower_right_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( lower_right_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f6a3fc1820b5e11b04de775d28917be93f7a033b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "lower_right_opt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); int *input_itemsets = NULL; cudaMalloc(&input_itemsets, XSIZE*YSIZE); int *reference = NULL; cudaMalloc(&reference, XSIZE*YSIZE); int max_rows = 1; int max_cols = 1; int i = 1; int penalty = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); lower_right_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { lower_right_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { lower_right_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c7886103fb2cb0d2b90faa43521eb05204488e64.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "histo_kernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); long size = XSIZE*YSIZE; int *histo = NULL; hipMalloc(&histo, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c7886103fb2cb0d2b90faa43521eb05204488e64.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "histo_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); long size = XSIZE*YSIZE; int *histo = NULL; cudaMalloc(&histo, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); histo_kernel<<<gridBlock,threadBlock>>>(buffer,size,histo); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { histo_kernel<<<gridBlock,threadBlock>>>(buffer,size,histo); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { histo_kernel<<<gridBlock,threadBlock>>>(buffer,size,histo); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
df23f4496bf189fb504f8a6948631827edef7344.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> __global__ void square(float *d_out, float *d_in) { int i = threadIdx.x; float f = d_in[i]; d_out[i] = f*f; } int main(int argc, char *argv[]) { const int ARRAY_COUNT = 64; float h_in[ARRAY_COUNT], h_out[ARRAY_COUNT]; for(int i=0 ; i<ARRAY_COUNT ; ++i) { h_in[i] = i; } const int ARRAY_SIZE = ARRAY_COUNT * sizeof *h_in; float* d_in = NULL; float* d_out = NULL; hipMalloc((void**) &d_in, ARRAY_SIZE); hipMalloc((void**) &d_out, ARRAY_SIZE); hipMemcpy(d_in, h_in, ARRAY_SIZE, hipMemcpyHostToDevice); hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_COUNT), 0, 0, d_out, d_in); hipMemcpy(h_out, d_out, ARRAY_SIZE, hipMemcpyDeviceToHost); for(int i=0 ; i<ARRAY_COUNT ; ++i) { printf("%f%s", h_out[i], (i+1)%4 ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return EXIT_SUCCESS; }
df23f4496bf189fb504f8a6948631827edef7344.cu
#include <stdlib.h> #include <stdio.h> __global__ void square(float *d_out, float *d_in) { int i = threadIdx.x; float f = d_in[i]; d_out[i] = f*f; } int main(int argc, char *argv[]) { const int ARRAY_COUNT = 64; float h_in[ARRAY_COUNT], h_out[ARRAY_COUNT]; for(int i=0 ; i<ARRAY_COUNT ; ++i) { h_in[i] = i; } const int ARRAY_SIZE = ARRAY_COUNT * sizeof *h_in; float* d_in = NULL; float* d_out = NULL; cudaMalloc((void**) &d_in, ARRAY_SIZE); cudaMalloc((void**) &d_out, ARRAY_SIZE); cudaMemcpy(d_in, h_in, ARRAY_SIZE, cudaMemcpyHostToDevice); square<<<1, ARRAY_COUNT>>>(d_out, d_in); cudaMemcpy(h_out, d_out, ARRAY_SIZE, cudaMemcpyDeviceToHost); for(int i=0 ; i<ARRAY_COUNT ; ++i) { printf("%f%s", h_out[i], (i+1)%4 ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return EXIT_SUCCESS; }
e66332164118b1908ae3c12ce6b83893a4204c72.hip
// !!! This is a file automatically generated by hipify!!! #include "wb.h" //@@ The purpose of this code is to become familiar with the submission //@@ process. Do not worry if you do not understand all the details of //@@ the code. int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); hipGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer return 0; }
e66332164118b1908ae3c12ce6b83893a4204c72.cu
#include "wb.h" //@@ The purpose of this code is to become familiar with the submission //@@ process. Do not worry if you do not understand all the details of //@@ the code. int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); cudaGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer return 0; }
8683cc90da629970b71b067f66435d6f02c69e2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FlockKernels.cuh" #include "FlockActions.cuh" #include "FlockParams.cuh" #include "FlockUtil.cuh" __global__ void computeAvgNeighbourPos( bool *_collision, float3 *_target, const float3 *_pos, const uint *_cellOcc, const uint *_scatterAddress) { uint gridCellIdx = cellFromGrid(blockIdx); uint numNeighbour = 0; float3 sumPos = make_float3(0.0f,0.0f,0.0f); /// if (threadIdx.x < _cellOcc[gridCellIdx]) { uint thisBoidIdx = _scatterAddress[gridCellIdx] + threadIdx.x; float3 thisPoint = _pos[thisBoidIdx]; // set collision flag to false here _collision[thisBoidIdx] = false; int threadInBlockIdx; uint otherCellIdx; uint otherBoidIdx; /// index through self and neighbours around a cell for (int i = ((blockIdx.x == 0) ? 0 : -1); i <= ((blockIdx.x == (gridDim.x - 1)) ? 0 : 1); ++i) { for (int j = ((blockIdx.y == 0) ? 0 : -1); j <= ((blockIdx.y == (gridDim.y - 1)) ? 0 : 1); ++j) { // Calculate the index of the other grid cell otherCellIdx = cellFromGrid(make_uint3(blockIdx.x + i, blockIdx.y + j, 0)); //printf("gridCellIdx=%d, otherGridCellIdx=%d\n",gridCellIdx,otherGridCellIdx); // Now iterate over all particles in this neighbouring cell for (threadInBlockIdx = 0; threadInBlockIdx < _cellOcc[otherCellIdx]; ++threadInBlockIdx) { // Determine the index of the neighbouring point in that cell otherBoidIdx = _scatterAddress[otherCellIdx] + threadInBlockIdx; float d2 = dist2(thisPoint, _pos[otherBoidIdx]); if ((otherBoidIdx != thisBoidIdx) && (d2 <= paramData.m_invRes2)) { /// sum position to prepare for average position _collision[thisBoidIdx] = true; sumPos = sumPos + _pos[otherBoidIdx]; ++numNeighbour; } } } } if(numNeighbour > 0) { /// set average position _target[thisBoidIdx] = sumPos / numNeighbour; } } } __global__ void genericBehaviour( float3 *_v, float3 *_col, float3 *_target, float3 *_pos, bool *_collision, const uint *_cellOcc, const uint *_scatterAddress, float *_angle, const float * _vMax) { uint gridCellIdx = cellFromGrid(blockIdx); /// if (threadIdx.x < _cellOcc[gridCellIdx]) { uint thisBoidIdx = _scatterAddress[gridCellIdx] + threadIdx.x; float3 thisPos = _pos[thisBoidIdx]; float3 f; float thisAng = _angle[thisBoidIdx]; float thisVMax = _vMax[thisBoidIdx]; if(_collision[thisBoidIdx]) { f = boidFleePattern(thisPos, _v[thisBoidIdx], _target[thisBoidIdx], thisVMax); _col[thisBoidIdx] = make_float3(255.0f,0.0f,0.0f); } else { _target[thisBoidIdx] = boidWanderPattern( thisAng, _v[thisBoidIdx], thisPos); f = boidSeekPattern( thisPos, _v[thisBoidIdx], _target[thisBoidIdx], thisVMax); _col[thisBoidIdx] = make_float3(0.0f,255.0f,0.0f); } resolveForce(_pos[thisBoidIdx],_v[thisBoidIdx],f,thisVMax); //_pos[thisBoidIdx] = thisPos; //_v[thisBoidIdx] = thisV; } } __device__ void resolveForce( float3 &_pos, float3 &_v, const float3 &_f, const float &_vMax) { float3 accel = make_float3(_f.x * paramData.m_invMass , _f.y * paramData.m_invMass , 0.0f); _v = make_float3(_v.x + accel.x,_v.y + accel.y,0.0f); if(length(_v) > 0.0f) { _v = clamp(_v, make_float3(-_vMax,-_vMax,0.0f), make_float3(_vMax,_vMax,0.0f)); _v = normalize(_v); } _pos = _pos + _v * paramData.m_dt; }
8683cc90da629970b71b067f66435d6f02c69e2d.cu
#include "FlockKernels.cuh" #include "FlockActions.cuh" #include "FlockParams.cuh" #include "FlockUtil.cuh" __global__ void computeAvgNeighbourPos( bool *_collision, float3 *_target, const float3 *_pos, const uint *_cellOcc, const uint *_scatterAddress) { uint gridCellIdx = cellFromGrid(blockIdx); uint numNeighbour = 0; float3 sumPos = make_float3(0.0f,0.0f,0.0f); /// if (threadIdx.x < _cellOcc[gridCellIdx]) { uint thisBoidIdx = _scatterAddress[gridCellIdx] + threadIdx.x; float3 thisPoint = _pos[thisBoidIdx]; // set collision flag to false here _collision[thisBoidIdx] = false; int threadInBlockIdx; uint otherCellIdx; uint otherBoidIdx; /// index through self and neighbours around a cell for (int i = ((blockIdx.x == 0) ? 0 : -1); i <= ((blockIdx.x == (gridDim.x - 1)) ? 0 : 1); ++i) { for (int j = ((blockIdx.y == 0) ? 0 : -1); j <= ((blockIdx.y == (gridDim.y - 1)) ? 0 : 1); ++j) { // Calculate the index of the other grid cell otherCellIdx = cellFromGrid(make_uint3(blockIdx.x + i, blockIdx.y + j, 0)); //printf("gridCellIdx=%d, otherGridCellIdx=%d\n",gridCellIdx,otherGridCellIdx); // Now iterate over all particles in this neighbouring cell for (threadInBlockIdx = 0; threadInBlockIdx < _cellOcc[otherCellIdx]; ++threadInBlockIdx) { // Determine the index of the neighbouring point in that cell otherBoidIdx = _scatterAddress[otherCellIdx] + threadInBlockIdx; float d2 = dist2(thisPoint, _pos[otherBoidIdx]); if ((otherBoidIdx != thisBoidIdx) && (d2 <= paramData.m_invRes2)) { /// sum position to prepare for average position _collision[thisBoidIdx] = true; sumPos = sumPos + _pos[otherBoidIdx]; ++numNeighbour; } } } } if(numNeighbour > 0) { /// set average position _target[thisBoidIdx] = sumPos / numNeighbour; } } } __global__ void genericBehaviour( float3 *_v, float3 *_col, float3 *_target, float3 *_pos, bool *_collision, const uint *_cellOcc, const uint *_scatterAddress, float *_angle, const float * _vMax) { uint gridCellIdx = cellFromGrid(blockIdx); /// if (threadIdx.x < _cellOcc[gridCellIdx]) { uint thisBoidIdx = _scatterAddress[gridCellIdx] + threadIdx.x; float3 thisPos = _pos[thisBoidIdx]; float3 f; float thisAng = _angle[thisBoidIdx]; float thisVMax = _vMax[thisBoidIdx]; if(_collision[thisBoidIdx]) { f = boidFleePattern(thisPos, _v[thisBoidIdx], _target[thisBoidIdx], thisVMax); _col[thisBoidIdx] = make_float3(255.0f,0.0f,0.0f); } else { _target[thisBoidIdx] = boidWanderPattern( thisAng, _v[thisBoidIdx], thisPos); f = boidSeekPattern( thisPos, _v[thisBoidIdx], _target[thisBoidIdx], thisVMax); _col[thisBoidIdx] = make_float3(0.0f,255.0f,0.0f); } resolveForce(_pos[thisBoidIdx],_v[thisBoidIdx],f,thisVMax); //_pos[thisBoidIdx] = thisPos; //_v[thisBoidIdx] = thisV; } } __device__ void resolveForce( float3 &_pos, float3 &_v, const float3 &_f, const float &_vMax) { float3 accel = make_float3(_f.x * paramData.m_invMass , _f.y * paramData.m_invMass , 0.0f); _v = make_float3(_v.x + accel.x,_v.y + accel.y,0.0f); if(length(_v) > 0.0f) { _v = clamp(_v, make_float3(-_vMax,-_vMax,0.0f), make_float3(_vMax,_vMax,0.0f)); _v = normalize(_v); } _pos = _pos + _v * paramData.m_dt; }
77a6eb9c38db315c46b478b145a482e6b9680686.hip
// !!! This is a file automatically generated by hipify!!! // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include <stdbool.h> #include <stdio.h> #include <vector> #include <iostream> /* Includes, cuda */ #include <hip/hip_runtime.h> #include <rocblas.h> /* Includes, cuda helper functions */ #include "nms_cuda_kernel.h" //#include <ATen/ATen.h> //THCState *state = at::globalContext().thc_state; #define CUDA_WARN(XXX) \ do { if (XXX != hipSuccess) std::cout << "CUDA Error: " << \ hipGetErrorString(XXX) << ", at line " << __LINE__ \ << std::endl; hipDeviceSynchronize(); } while (0) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(int n_boxes, float nms_overlap_thresh, float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh) { float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); // printf("i am at line %d\n", boxes_num); // printf("i am at line %d\n", boxes_dim); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); // we need to create a memory for keep_out on cpu // otherwise, the following code cannot run int* keep_out_cpu = new int[boxes_num]; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { // orignal: keep_out[num_to_keep++] = i; keep_out_cpu[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } // copy keep_out_cpu to keep_out on gpu CUDA_WARN(hipMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int),hipMemcpyHostToDevice)); // *num_out = num_to_keep; // original: *num_out = num_to_keep; // copy num_to_keep to num_out on gpu CUDA_WARN(hipMemcpy(num_out, &num_to_keep, 1 * sizeof(int),hipMemcpyHostToDevice)); // release cuda memory CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); // release cpu memory delete []keep_out_cpu; }
77a6eb9c38db315c46b478b145a482e6b9680686.cu
// ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include <stdbool.h> #include <stdio.h> #include <vector> #include <iostream> /* Includes, cuda */ #include <cuda_runtime.h> #include <cublas_v2.h> /* Includes, cuda helper functions */ #include "nms_cuda_kernel.h" //#include <ATen/ATen.h> //THCState *state = at::globalContext().thc_state; #define CUDA_WARN(XXX) \ do { if (XXX != cudaSuccess) std::cout << "CUDA Error: " << \ cudaGetErrorString(XXX) << ", at line " << __LINE__ \ << std::endl; cudaDeviceSynchronize(); } while (0) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(int n_boxes, float nms_overlap_thresh, float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh) { float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); // printf("i am at line %d\n", boxes_num); // printf("i am at line %d\n", boxes_dim); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); // we need to create a memory for keep_out on cpu // otherwise, the following code cannot run int* keep_out_cpu = new int[boxes_num]; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { // orignal: keep_out[num_to_keep++] = i; keep_out_cpu[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } // copy keep_out_cpu to keep_out on gpu CUDA_WARN(cudaMemcpy(keep_out, keep_out_cpu, boxes_num * sizeof(int),cudaMemcpyHostToDevice)); // *num_out = num_to_keep; // original: *num_out = num_to_keep; // copy num_to_keep to num_out on gpu CUDA_WARN(cudaMemcpy(num_out, &num_to_keep, 1 * sizeof(int),cudaMemcpyHostToDevice)); // release cuda memory CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); // release cpu memory delete []keep_out_cpu; }
7074cdca1a4b705ace4acf8980ffce558ad009d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cu_leaky_relu(const float* src, float* dst, int n){ const float leaky_relu_alpha = 100.0; int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ float p = 0.0; float n = 0.0; if(src[tid] > 0.0) p = src[tid]; if(src[tid] < 0.0) n = src[tid]; n = fdividef(n, leaky_relu_alpha); dst[tid] = __fadd_rd(p, n); tid += stride; } }
7074cdca1a4b705ace4acf8980ffce558ad009d8.cu
#include "includes.h" __global__ void cu_leaky_relu(const float* src, float* dst, int n){ const float leaky_relu_alpha = 100.0; int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ float p = 0.0; float n = 0.0; if(src[tid] > 0.0) p = src[tid]; if(src[tid] < 0.0) n = src[tid]; n = fdividef(n, leaky_relu_alpha); dst[tid] = __fadd_rd(p, n); tid += stride; } }
a5014594121428aa765296bb4c62961d53ea1143.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define COALESCED_NUM 16 #define blockDimX 256 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 8 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define A(y,x) A[(y)*WIDTH_A+(x)] #define B(y,x) B[(y)*WIDTH_B+(x)] #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_C 2048 #define WIDTH_B 16 #define WIDTH_A (2048+16) __global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h) { __shared__ float shared_1[16][9]; __shared__ float shared_0[272]; int j; float sum_0 = 0; float sum_1 = 0; float sum_2 = 0; float sum_3 = 0; float sum_4 = 0; float sum_5 = 0; float sum_6 = 0; float sum_7 = 0; int it_2; for (j=0; j<(h-7); j=(j+1)) { int it_2; if ((tidx<16)) { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*j))+h), (idx+(( - 1)*0))); } shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*j))+h), ((idx+(( - 1)*0))+16)); __syncthreads(); if ((tidx<16)) { shared_1[(tidx+0)][0]=B((j+0), (0+tidx)); shared_1[(tidx+0)][1]=B((j+1), (0+tidx)); shared_1[(tidx+0)][2]=B((j+2), (0+tidx)); shared_1[(tidx+0)][3]=B((j+3), (0+tidx)); shared_1[(tidx+0)][4]=B((j+4), (0+tidx)); shared_1[(tidx+0)][5]=B((j+5), (0+tidx)); shared_1[(tidx+0)][6]=B((j+6), (0+tidx)); shared_1[(tidx+0)][7]=B((j+7), (0+tidx)); } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } __syncthreads(); __syncthreads(); } if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-1)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-1)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; sum_0+=(a*b_0); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-2)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-2)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; sum_0+=(a*b_0); sum_1+=(a*b_1); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-3)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-3)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-4)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-4)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-5)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-5)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-6)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-6)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-6), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][5]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-7)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-7)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-7), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-6), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][5]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][6]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); } C(((idy*8)+0), idx)=sum_0; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-1)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-1)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][1]=B(0, (0+tidx)); } { shared_1[(tidx+0)][2]=B(1, (0+tidx)); } { shared_1[(tidx+0)][3]=B(2, (0+tidx)); } { shared_1[(tidx+0)][4]=B(3, (0+tidx)); } { shared_1[(tidx+0)][5]=B(4, (0+tidx)); } { shared_1[(tidx+0)][6]=B(5, (0+tidx)); } { shared_1[(tidx+0)][7]=B(6, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+1), idx)=sum_1; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-2)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-2)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][2]=B(0, (0+tidx)); } { shared_1[(tidx+0)][3]=B(1, (0+tidx)); } { shared_1[(tidx+0)][4]=B(2, (0+tidx)); } { shared_1[(tidx+0)][5]=B(3, (0+tidx)); } { shared_1[(tidx+0)][6]=B(4, (0+tidx)); } { shared_1[(tidx+0)][7]=B(5, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+2), idx)=sum_2; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-3)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-3)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][3]=B(0, (0+tidx)); } { shared_1[(tidx+0)][4]=B(1, (0+tidx)); } { shared_1[(tidx+0)][5]=B(2, (0+tidx)); } { shared_1[(tidx+0)][6]=B(3, (0+tidx)); } { shared_1[(tidx+0)][7]=B(4, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+3), idx)=sum_3; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-4)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-4)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][4]=B(0, (0+tidx)); } { shared_1[(tidx+0)][5]=B(1, (0+tidx)); } { shared_1[(tidx+0)][6]=B(2, (0+tidx)); } { shared_1[(tidx+0)][7]=B(3, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+4), idx)=sum_4; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-5)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-5)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][5]=B(0, (0+tidx)); } { shared_1[(tidx+0)][6]=B(1, (0+tidx)); } { shared_1[(tidx+0)][7]=B(2, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+5), idx)=sum_5; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-6)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-6)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][6]=B(0, (0+tidx)); } { shared_1[(tidx+0)][7]=B(1, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+6), idx)=sum_6; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-7)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-7)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][7]=B(0, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_7=shared_1[it_2][7]; sum_7+=(a*b_7); } C(((idy*8)+7), idx)=sum_7; __syncthreads(); __syncthreads(); { } { } { } { } { } { } { } { } }
a5014594121428aa765296bb4c62961d53ea1143.cu
#define COALESCED_NUM 16 #define blockDimX 256 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 8 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define A(y,x) A[(y)*WIDTH_A+(x)] #define B(y,x) B[(y)*WIDTH_B+(x)] #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_C 2048 #define WIDTH_B 16 #define WIDTH_A (2048+16) __global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h) { __shared__ float shared_1[16][9]; __shared__ float shared_0[272]; int j; float sum_0 = 0; float sum_1 = 0; float sum_2 = 0; float sum_3 = 0; float sum_4 = 0; float sum_5 = 0; float sum_6 = 0; float sum_7 = 0; int it_2; for (j=0; j<(h-7); j=(j+1)) { int it_2; if ((tidx<16)) { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*j))+h), (idx+(( - 1)*0))); } shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*j))+h), ((idx+(( - 1)*0))+16)); __syncthreads(); if ((tidx<16)) { shared_1[(tidx+0)][0]=B((j+0), (0+tidx)); shared_1[(tidx+0)][1]=B((j+1), (0+tidx)); shared_1[(tidx+0)][2]=B((j+2), (0+tidx)); shared_1[(tidx+0)][3]=B((j+3), (0+tidx)); shared_1[(tidx+0)][4]=B((j+4), (0+tidx)); shared_1[(tidx+0)][5]=B((j+5), (0+tidx)); shared_1[(tidx+0)][6]=B((j+6), (0+tidx)); shared_1[(tidx+0)][7]=B((j+7), (0+tidx)); } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } __syncthreads(); __syncthreads(); } if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-1)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-1)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; sum_0+=(a*b_0); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-2)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-2)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; sum_0+=(a*b_0); sum_1+=(a*b_1); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-3)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-3)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-4)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-4)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-5)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-5)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-6)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-6)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-6), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][5]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); } __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(h-7)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(h-7)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][0]=B((h-7), (0+tidx)); } { shared_1[(tidx+0)][1]=B((h-6), (0+tidx)); } { shared_1[(tidx+0)][2]=B((h-5), (0+tidx)); } { shared_1[(tidx+0)][3]=B((h-4), (0+tidx)); } { shared_1[(tidx+0)][4]=B((h-3), (0+tidx)); } { shared_1[(tidx+0)][5]=B((h-2), (0+tidx)); } { shared_1[(tidx+0)][6]=B((h-1), (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_0; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_0=shared_1[it_2][0]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; sum_0+=(a*b_0); sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); } C(((idy*8)+0), idx)=sum_0; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-1)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-1)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][1]=B(0, (0+tidx)); } { shared_1[(tidx+0)][2]=B(1, (0+tidx)); } { shared_1[(tidx+0)][3]=B(2, (0+tidx)); } { shared_1[(tidx+0)][4]=B(3, (0+tidx)); } { shared_1[(tidx+0)][5]=B(4, (0+tidx)); } { shared_1[(tidx+0)][6]=B(5, (0+tidx)); } { shared_1[(tidx+0)][7]=B(6, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_1; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_1=shared_1[it_2][1]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_1+=(a*b_1); sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+1), idx)=sum_1; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-2)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-2)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][2]=B(0, (0+tidx)); } { shared_1[(tidx+0)][3]=B(1, (0+tidx)); } { shared_1[(tidx+0)][4]=B(2, (0+tidx)); } { shared_1[(tidx+0)][5]=B(3, (0+tidx)); } { shared_1[(tidx+0)][6]=B(4, (0+tidx)); } { shared_1[(tidx+0)][7]=B(5, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_2; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_2=shared_1[it_2][2]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_2+=(a*b_2); sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+2), idx)=sum_2; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-3)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-3)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][3]=B(0, (0+tidx)); } { shared_1[(tidx+0)][4]=B(1, (0+tidx)); } { shared_1[(tidx+0)][5]=B(2, (0+tidx)); } { shared_1[(tidx+0)][6]=B(3, (0+tidx)); } { shared_1[(tidx+0)][7]=B(4, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_3; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_3=shared_1[it_2][3]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_3+=(a*b_3); sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+3), idx)=sum_3; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-4)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-4)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][4]=B(0, (0+tidx)); } { shared_1[(tidx+0)][5]=B(1, (0+tidx)); } { shared_1[(tidx+0)][6]=B(2, (0+tidx)); } { shared_1[(tidx+0)][7]=B(3, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_4; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_4=shared_1[it_2][4]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_4+=(a*b_4); sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+4), idx)=sum_4; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-5)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-5)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][5]=B(0, (0+tidx)); } { shared_1[(tidx+0)][6]=B(1, (0+tidx)); } { shared_1[(tidx+0)][7]=B(2, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_5; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_5=shared_1[it_2][5]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_5+=(a*b_5); sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+5), idx)=sum_5; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-6)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-6)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][6]=B(0, (0+tidx)); } { shared_1[(tidx+0)][7]=B(1, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_6; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_6=shared_1[it_2][6]; b_7=shared_1[it_2][7]; sum_6+=(a*b_6); sum_7+=(a*b_7); } C(((idy*8)+6), idx)=sum_6; __syncthreads(); __syncthreads(); if ((tidx<16)) { { shared_0[(tidx+0)]=A((((idy*8)+(( - 1)*(0-7)))+h), (idx+(( - 1)*0))); } } { shared_0[(tidx+16)]=A((((idy*8)+(( - 1)*(0-7)))+h), ((idx+(( - 1)*0))+16)); } __syncthreads(); if ((tidx<16)) { { shared_1[(tidx+0)][7]=B(0, (0+tidx)); } } __syncthreads(); #pragma unroll for (it_2=0; it_2<16; it_2=(it_2+1)) { float a; float b_7; a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)]; b_7=shared_1[it_2][7]; sum_7+=(a*b_7); } C(((idy*8)+7), idx)=sum_7; __syncthreads(); __syncthreads(); { } { } { } { } { } { } { } { } }
46c834de0371c53d2b57315e2dff562264210b89.hip
// !!! This is a file automatically generated by hipify!!! // Code written by Tanmay Agrawal for simulation of two dimensional heat conduction problem with second order finite difference scheme. // Left, right and bottom walls are at a temperature of 20 units while the top wall has a sinusoidal temperature distribution. // Simulated with CUDA-C #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define grid 1024 #define THREADX 64 #define THREADY 1 void init( float *T, float *Texact ) { int i, j, k; double pi; pi=acos(-1); for (i=0; i<grid; i++) { for (j=0; j<grid; j++) { k=j*grid+i; if (i==0 && j>=0 && j<grid) { T[k]=20.0; } else if (i==(grid-1) && j>=0 && j<grid) { T[k]=20.0; } else if (j==0 && i>=0 && i<grid) { T[k]=20.0; } else if (j==(grid-1) && i>0 && i<(grid-1)) { T[k]=20.0+80.0*sin((pi*i)/grid); } else { T[k]=0.0; } Texact[k]=20.0 + (80.0*sin((pi*i)/grid)*sinh((pi*j)/grid))/(sinh(pi)); } } } __global__ void calculation1 (float *T, float *Tnew) { int i, j, k; i = blockDim.x * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; k=j*grid+i; if (i>0 && i<(grid-1) && j>0 && j<(grid-1) ) { Tnew[k] = 0.25f*(T[k-1] + T[k+1] + T[k-grid] + T[k+grid]); } else { Tnew[k] = T[k]; } } __global__ void calculation2 (float *T, float *Tnew) { int i, j, k; i = blockDim.x * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; k=j*grid+i; T[k]=Tnew[k]; } int main (void) { float *T_h, *Tnew_h, *Texact; float *T_d, *Tnew_d; int i, j, k, timeStep, maxStep; size_t NG = grid*grid*sizeof(float); dim3 dimGrid( (grid/THREADX), (grid/THREADY)); dim3 dimBlock( THREADX, THREADY ); // LOCATE MEMORY--HOST T_h = (float*)malloc(NG); Tnew_h = (float*)malloc(NG); Texact = (float*)malloc(NG); // LOCATE MEMORY--DEVICE hipMalloc((void**)&T_d, NG); hipMalloc((void**)&Tnew_d, NG); // INITIALIZATION init( T_h, Texact ); // File writing after initialisation FILE *fout1; fout1 = fopen("Initialisation.dat","w+t"); if ( fout1 == NULL ) { printf("\nERROR when opening file\n"); fclose( fout1 ); } else { fprintf( fout1, "VARIABLES=\"X\",\"Y\",\"T\"\n"); fprintf( fout1, "ZONE F=POINT\n"); fprintf( fout1, "I=%d, J=%d\n", grid, grid ); for ( j = 0 ; j < grid ; j++ ) { for ( i = 0 ; i < grid ; i++ ) { k = j*grid + i; float dx, dy, xpos, ypos; dx=1.0/grid; dy=dx; xpos = i*dx; ypos = j*dy; fprintf( fout1, "%5.8f\t%5.8f\t%5.8f\n", xpos, ypos, T_h[k] ); } } } fclose( fout1 ); // COPY MEMORY FROM HOST TO DEVICE hipMemcpy( T_d, T_h, NG, hipMemcpyHostToDevice ); hipMemcpy( Tnew_d, T_h, NG, hipMemcpyHostToDevice ); printf("ENTER MAXIMUM SIMULATION STEPS\n"); scanf("%d", &maxStep); // START ITERATION for ( timeStep = 1 ; timeStep <= maxStep ; timeStep++ ) { //Kernel launch hipLaunchKernelGGL(( calculation1), dim3(dimGrid), dim3(dimBlock), 0, 0, T_d, Tnew_d ); hipLaunchKernelGGL(( calculation2), dim3(dimGrid), dim3(dimBlock), 0, 0, T_d, Tnew_d ); //printf("Inside the loop\n"); } // COPY MEMORY FROM DEVICE TO HOST hipMemcpy( Tnew_h, T_d, NG, hipMemcpyDeviceToHost ); // OUTPUT DATA FILE *fout2, *fout3, *fout4; fout2 = fopen("TemperatureDistribution.dat","w+t"); fout3 = fopen("CentreLineTemperature.dat","w+t"); fout4 = fopen("CentreLineExact.dat","w+t"); if ( fout2 == NULL ) { printf("\nERROR when opening file\n"); fclose( fout2 ); } else { fprintf( fout2, "VARIABLES=\"X\",\"Y\",\"T\"\n"); fprintf( fout2, "ZONE F=POINT\n"); fprintf( fout2, "I=%d, J=%d\n", grid, grid ); for ( j = 0 ; j < grid ; j++ ) { for ( i = 0 ; i < grid ; i++ ) { k = j*grid + i; float dx, dy, xpos, ypos; dx=1.0/grid; dy=dx; xpos = i*dx; ypos = j*dy; fprintf( fout2, "%5.8f\t%5.8f\t%5.8f\n", xpos, ypos, Tnew_h[k] ); } } } fclose( fout2 ); // CENTRAL TEMP--T fprintf(fout3, "VARIABLES=\"X / L\",\"T\"\n"); fprintf(fout3, "ZONE F=POINT\n"); fprintf(fout3, "I=%d\n", grid ); for ( i = 0 ; i < grid ; i++ ) { int k1 = (grid/2)*grid + i; int k2 = (grid/2-1)*grid + i; float dx, xpos; dx=1.0/grid; xpos = (float) i*dx; fprintf( fout3, "%5.8f\t%5.8f\n", xpos, (Tnew_h[k1] + Tnew_h[k2])/(2.0) ); } // EXACT SOLUTION CENTERLINE fprintf(fout4, "VARIABLES=\"X / L\",\"Texact\"\n"); fprintf(fout4, "ZONE F=POINT\n"); fprintf(fout4, "I=%d\n", grid ); for ( i = 0 ; i < grid ; i++ ) { int k1 = (grid/2)*grid + i; int k2 = (grid/2-1)*grid + i; float dx, xpos; dx=1.0/grid; xpos = (float) i*dx; fprintf( fout4, "%5.8f\t%5.8f\n", xpos, (Texact[k1] + Texact[k2])/(2.0) ); } // FREE MEMORY hipFree( T_d ); }
46c834de0371c53d2b57315e2dff562264210b89.cu
// Code written by Tanmay Agrawal for simulation of two dimensional heat conduction problem with second order finite difference scheme. // Left, right and bottom walls are at a temperature of 20 units while the top wall has a sinusoidal temperature distribution. // Simulated with CUDA-C #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define grid 1024 #define THREADX 64 #define THREADY 1 void init( float *T, float *Texact ) { int i, j, k; double pi; pi=acos(-1); for (i=0; i<grid; i++) { for (j=0; j<grid; j++) { k=j*grid+i; if (i==0 && j>=0 && j<grid) { T[k]=20.0; } else if (i==(grid-1) && j>=0 && j<grid) { T[k]=20.0; } else if (j==0 && i>=0 && i<grid) { T[k]=20.0; } else if (j==(grid-1) && i>0 && i<(grid-1)) { T[k]=20.0+80.0*sin((pi*i)/grid); } else { T[k]=0.0; } Texact[k]=20.0 + (80.0*sin((pi*i)/grid)*sinh((pi*j)/grid))/(sinh(pi)); } } } __global__ void calculation1 (float *T, float *Tnew) { int i, j, k; i = blockDim.x * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; k=j*grid+i; if (i>0 && i<(grid-1) && j>0 && j<(grid-1) ) { Tnew[k] = 0.25f*(T[k-1] + T[k+1] + T[k-grid] + T[k+grid]); } else { Tnew[k] = T[k]; } } __global__ void calculation2 (float *T, float *Tnew) { int i, j, k; i = blockDim.x * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; k=j*grid+i; T[k]=Tnew[k]; } int main (void) { float *T_h, *Tnew_h, *Texact; float *T_d, *Tnew_d; int i, j, k, timeStep, maxStep; size_t NG = grid*grid*sizeof(float); dim3 dimGrid( (grid/THREADX), (grid/THREADY)); dim3 dimBlock( THREADX, THREADY ); // LOCATE MEMORY--HOST T_h = (float*)malloc(NG); Tnew_h = (float*)malloc(NG); Texact = (float*)malloc(NG); // LOCATE MEMORY--DEVICE cudaMalloc((void**)&T_d, NG); cudaMalloc((void**)&Tnew_d, NG); // INITIALIZATION init( T_h, Texact ); // File writing after initialisation FILE *fout1; fout1 = fopen("Initialisation.dat","w+t"); if ( fout1 == NULL ) { printf("\nERROR when opening file\n"); fclose( fout1 ); } else { fprintf( fout1, "VARIABLES=\"X\",\"Y\",\"T\"\n"); fprintf( fout1, "ZONE F=POINT\n"); fprintf( fout1, "I=%d, J=%d\n", grid, grid ); for ( j = 0 ; j < grid ; j++ ) { for ( i = 0 ; i < grid ; i++ ) { k = j*grid + i; float dx, dy, xpos, ypos; dx=1.0/grid; dy=dx; xpos = i*dx; ypos = j*dy; fprintf( fout1, "%5.8f\t%5.8f\t%5.8f\n", xpos, ypos, T_h[k] ); } } } fclose( fout1 ); // COPY MEMORY FROM HOST TO DEVICE cudaMemcpy( T_d, T_h, NG, cudaMemcpyHostToDevice ); cudaMemcpy( Tnew_d, T_h, NG, cudaMemcpyHostToDevice ); printf("ENTER MAXIMUM SIMULATION STEPS\n"); scanf("%d", &maxStep); // START ITERATION for ( timeStep = 1 ; timeStep <= maxStep ; timeStep++ ) { //Kernel launch calculation1<<<dimGrid, dimBlock>>>( T_d, Tnew_d ); calculation2<<<dimGrid, dimBlock>>>( T_d, Tnew_d ); //printf("Inside the loop\n"); } // COPY MEMORY FROM DEVICE TO HOST cudaMemcpy( Tnew_h, T_d, NG, cudaMemcpyDeviceToHost ); // OUTPUT DATA FILE *fout2, *fout3, *fout4; fout2 = fopen("TemperatureDistribution.dat","w+t"); fout3 = fopen("CentreLineTemperature.dat","w+t"); fout4 = fopen("CentreLineExact.dat","w+t"); if ( fout2 == NULL ) { printf("\nERROR when opening file\n"); fclose( fout2 ); } else { fprintf( fout2, "VARIABLES=\"X\",\"Y\",\"T\"\n"); fprintf( fout2, "ZONE F=POINT\n"); fprintf( fout2, "I=%d, J=%d\n", grid, grid ); for ( j = 0 ; j < grid ; j++ ) { for ( i = 0 ; i < grid ; i++ ) { k = j*grid + i; float dx, dy, xpos, ypos; dx=1.0/grid; dy=dx; xpos = i*dx; ypos = j*dy; fprintf( fout2, "%5.8f\t%5.8f\t%5.8f\n", xpos, ypos, Tnew_h[k] ); } } } fclose( fout2 ); // CENTRAL TEMP--T fprintf(fout3, "VARIABLES=\"X / L\",\"T\"\n"); fprintf(fout3, "ZONE F=POINT\n"); fprintf(fout3, "I=%d\n", grid ); for ( i = 0 ; i < grid ; i++ ) { int k1 = (grid/2)*grid + i; int k2 = (grid/2-1)*grid + i; float dx, xpos; dx=1.0/grid; xpos = (float) i*dx; fprintf( fout3, "%5.8f\t%5.8f\n", xpos, (Tnew_h[k1] + Tnew_h[k2])/(2.0) ); } // EXACT SOLUTION CENTERLINE fprintf(fout4, "VARIABLES=\"X / L\",\"Texact\"\n"); fprintf(fout4, "ZONE F=POINT\n"); fprintf(fout4, "I=%d\n", grid ); for ( i = 0 ; i < grid ; i++ ) { int k1 = (grid/2)*grid + i; int k2 = (grid/2-1)*grid + i; float dx, xpos; dx=1.0/grid; xpos = (float) i*dx; fprintf( fout4, "%5.8f\t%5.8f\n", xpos, (Texact[k1] + Texact[k2])/(2.0) ); } // FREE MEMORY cudaFree( T_d ); }
d0fba3f3c9745c5f4491a579d86ea3fee1c1d896.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <helper_cuda.h> #include "cudaUtils.h" #include "matlabData.h" #include "evolutionUtils.h" #include "reactProb.h" #include "evolutionAux.cu" ReactionProbabilities::ReactionProbabilities(const OmegaWavepacket *wavepacket_) : wavepacket(wavepacket_), fai_on_surface_dev(0), d_fai_on_surface_dev(0), psi_real_dev(0), d_psi_real_dev(0), psi_imag_dev(0), d_psi_imag_dev(0) { reaction_probabilities.resize(MatlabData::crp_parameters()->n_energies, 0); setup_data_on_device(); } ReactionProbabilities::~ReactionProbabilities() { std::cout << " Destroy reaction probabilities data for Omega: " << wavepacket->omega << std::endl; reaction_probabilities.resize(0); wavepacket = 0; psi_real_dev = 0; d_psi_real_dev = 0; psi_imag_dev = 0; d_psi_imag_dev = 0; _CUDA_FREE_(fai_on_surface_dev); _CUDA_FREE_(d_fai_on_surface_dev); } void ReactionProbabilities::setup_data_on_device() { std::cout << " Setup reaction probabilities data on device for Omega: " << wavepacket->omega << std::endl; const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const int &n_energies = MatlabData::crp_parameters()->n_energies; if(!fai_on_surface_dev) { checkCudaErrors(hipMalloc(&fai_on_surface_dev, n1*n_theta*n_energies*sizeof(Complex))); checkCudaErrors(hipMemset(fai_on_surface_dev, 0, n1*n_theta*n_energies*sizeof(Complex))); } if(!d_fai_on_surface_dev) { checkCudaErrors(hipMalloc(&d_fai_on_surface_dev, n1*n_theta*n_energies*sizeof(Complex))); checkCudaErrors(hipMemset(d_fai_on_surface_dev, 0, n1*n_theta*n_energies*sizeof(Complex))); } } void ReactionProbabilities::calculate_psi_gradients_on_dividing_surface() { const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; const double &dr2 = MatlabData::r2()->dr; const int &n_dividing_surface = MatlabData::crp_parameters()->n_dividing_surface; const int &n_gradient_points = MatlabData::crp_parameters()->n_gradient_points; insist(n_dividing_surface < n2); psi_real_dev = wavepacket->work_dev; d_psi_real_dev = psi_real_dev + n1*n_theta; psi_imag_dev = d_psi_real_dev + n1*n_theta; d_psi_imag_dev = psi_imag_dev + n1*n_theta; Num1ststGradient::gradients_2_3d(n1, n2, n_theta, n_dividing_surface, n_gradient_points, dr2, wavepacket->weighted_psi_real_dev, psi_real_dev, d_psi_real_dev); Num1ststGradient::gradients_2_3d(n1, n2, n_theta, n_dividing_surface, n_gradient_points, dr2, wavepacket->weighted_psi_imag_dev, psi_imag_dev, d_psi_imag_dev); } void ReactionProbabilities::psi_time_to_fai_energy_on_dividing_surface() const { const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const int &n_energies = MatlabData::crp_parameters()->n_energies; const double &dt = MatlabData::time()->time_step; const int &n_steps = MatlabData::time()->steps; const double t = n_steps*dt; const int n_threads = _NTHREADS_; const int n_blocks = cudaUtils::number_of_blocks(n_threads, n1*n_theta*n_energies); hipLaunchKernelGGL(( _psi_time_to_fai_energy_on_dividing_surface_), dim3(n_blocks), dim3(n_threads), n_energies*sizeof(Complex), 0, n1*n_theta, n_energies, t, dt, psi_real_dev, psi_imag_dev, d_psi_real_dev, d_psi_imag_dev, fai_on_surface_dev, d_fai_on_surface_dev); } void ReactionProbabilities::calculate_reaction_probabilities() { const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const double &dr1 = MatlabData::r1()->dr; const double &mu2 = MatlabData::r2()->mass; const int &n_energies = MatlabData::crp_parameters()->n_energies; const double *eta_sq = MatlabData::crp_parameters()->eta_sq; const double dr1_mu2 = dr1/mu2; insist(reaction_probabilities.size() == n_energies); for(int iE = 0; iE < n_energies; iE++) { const Complex *fai = fai_on_surface_dev + iE*n1*n_theta; const Complex *dfai = d_fai_on_surface_dev + iE*n1*n_theta; Complex s(0.0, 0.0); insist(hipblasZdotc(wavepacket->cublas_handle, n1*n_theta, (hipDoubleComplex *) dfai, 1, (hipDoubleComplex *) fai, 1, (hipDoubleComplex *) &s) == HIPBLAS_STATUS_SUCCESS); reaction_probabilities[iE] = s.imag()/eta_sq[iE]*dr1_mu2; } } void ReactionProbabilities::calculate_reaction_probabilities(const int calculate) { calculate_psi_gradients_on_dividing_surface(); psi_time_to_fai_energy_on_dividing_surface(); if(calculate) calculate_reaction_probabilities(); }
d0fba3f3c9745c5f4491a579d86ea3fee1c1d896.cu
#include <iostream> #include <helper_cuda.h> #include "cudaUtils.h" #include "matlabData.h" #include "evolutionUtils.h" #include "reactProb.h" #include "evolutionAux.cu" ReactionProbabilities::ReactionProbabilities(const OmegaWavepacket *wavepacket_) : wavepacket(wavepacket_), fai_on_surface_dev(0), d_fai_on_surface_dev(0), psi_real_dev(0), d_psi_real_dev(0), psi_imag_dev(0), d_psi_imag_dev(0) { reaction_probabilities.resize(MatlabData::crp_parameters()->n_energies, 0); setup_data_on_device(); } ReactionProbabilities::~ReactionProbabilities() { std::cout << " Destroy reaction probabilities data for Omega: " << wavepacket->omega << std::endl; reaction_probabilities.resize(0); wavepacket = 0; psi_real_dev = 0; d_psi_real_dev = 0; psi_imag_dev = 0; d_psi_imag_dev = 0; _CUDA_FREE_(fai_on_surface_dev); _CUDA_FREE_(d_fai_on_surface_dev); } void ReactionProbabilities::setup_data_on_device() { std::cout << " Setup reaction probabilities data on device for Omega: " << wavepacket->omega << std::endl; const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const int &n_energies = MatlabData::crp_parameters()->n_energies; if(!fai_on_surface_dev) { checkCudaErrors(cudaMalloc(&fai_on_surface_dev, n1*n_theta*n_energies*sizeof(Complex))); checkCudaErrors(cudaMemset(fai_on_surface_dev, 0, n1*n_theta*n_energies*sizeof(Complex))); } if(!d_fai_on_surface_dev) { checkCudaErrors(cudaMalloc(&d_fai_on_surface_dev, n1*n_theta*n_energies*sizeof(Complex))); checkCudaErrors(cudaMemset(d_fai_on_surface_dev, 0, n1*n_theta*n_energies*sizeof(Complex))); } } void ReactionProbabilities::calculate_psi_gradients_on_dividing_surface() { const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; const double &dr2 = MatlabData::r2()->dr; const int &n_dividing_surface = MatlabData::crp_parameters()->n_dividing_surface; const int &n_gradient_points = MatlabData::crp_parameters()->n_gradient_points; insist(n_dividing_surface < n2); psi_real_dev = wavepacket->work_dev; d_psi_real_dev = psi_real_dev + n1*n_theta; psi_imag_dev = d_psi_real_dev + n1*n_theta; d_psi_imag_dev = psi_imag_dev + n1*n_theta; Num1ststGradient::gradients_2_3d(n1, n2, n_theta, n_dividing_surface, n_gradient_points, dr2, wavepacket->weighted_psi_real_dev, psi_real_dev, d_psi_real_dev); Num1ststGradient::gradients_2_3d(n1, n2, n_theta, n_dividing_surface, n_gradient_points, dr2, wavepacket->weighted_psi_imag_dev, psi_imag_dev, d_psi_imag_dev); } void ReactionProbabilities::psi_time_to_fai_energy_on_dividing_surface() const { const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const int &n_energies = MatlabData::crp_parameters()->n_energies; const double &dt = MatlabData::time()->time_step; const int &n_steps = MatlabData::time()->steps; const double t = n_steps*dt; const int n_threads = _NTHREADS_; const int n_blocks = cudaUtils::number_of_blocks(n_threads, n1*n_theta*n_energies); _psi_time_to_fai_energy_on_dividing_surface_<<<n_blocks, n_threads, n_energies*sizeof(Complex)>>> (n1*n_theta, n_energies, t, dt, psi_real_dev, psi_imag_dev, d_psi_real_dev, d_psi_imag_dev, fai_on_surface_dev, d_fai_on_surface_dev); } void ReactionProbabilities::calculate_reaction_probabilities() { const int &n1 = MatlabData::r1()->n; const int &n_theta = MatlabData::theta()->n; const double &dr1 = MatlabData::r1()->dr; const double &mu2 = MatlabData::r2()->mass; const int &n_energies = MatlabData::crp_parameters()->n_energies; const double *eta_sq = MatlabData::crp_parameters()->eta_sq; const double dr1_mu2 = dr1/mu2; insist(reaction_probabilities.size() == n_energies); for(int iE = 0; iE < n_energies; iE++) { const Complex *fai = fai_on_surface_dev + iE*n1*n_theta; const Complex *dfai = d_fai_on_surface_dev + iE*n1*n_theta; Complex s(0.0, 0.0); insist(cublasZdotc(wavepacket->cublas_handle, n1*n_theta, (cuDoubleComplex *) dfai, 1, (cuDoubleComplex *) fai, 1, (cuDoubleComplex *) &s) == CUBLAS_STATUS_SUCCESS); reaction_probabilities[iE] = s.imag()/eta_sq[iE]*dr1_mu2; } } void ReactionProbabilities::calculate_reaction_probabilities(const int calculate) { calculate_psi_gradients_on_dividing_surface(); psi_time_to_fai_energy_on_dividing_surface(); if(calculate) calculate_reaction_probabilities(); }
6428f77213a0829569763014850960be2df0e289.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. // All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "../../utils.h" #include "convolution_hip.cuh" #define CUB_NS_PREFIX namespace kaolin { #define CUB_NS_POSTFIX } #include <hipcub/hipcub.hpp> namespace kaolin { using namespace cub; #define THREADS_PER_BLOCK 64 namespace minkowski { template <typename Dtype, typename Itype> void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel, Dtype *d_out_feat, int out_nchannel, const Dtype *d_kernel, const pInOutMaps<Itype> &in_map, const pInOutMaps<Itype> &out_map, int out_nrows, hipblasHandle_t cuhandle, hipStream_t stream); template <typename Dtype, typename Itype> void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat, int in_nchannel, const Dtype *d_grad_out_feat, int out_nchannel, const Dtype *d_kernel, Dtype *d_grad_kernel, const pInOutMaps<Itype> &in_map, const pInOutMaps<Itype> &out_map, int out_nrows, hipblasHandle_t cuhandle, hipStream_t stream); } //end namespace minkowski uint GetPyramid(uint* Pyramid, int batch, int k, int level, int olevel) { return Pyramid[(2 * batch + k) * (olevel + 2) + level]; } uint64_t GetStorageBytesX(void* d_temp_storage, uint* d_Info, uint* d_PrefixSum, uint max_total_points) { uint64_t temp_storage_bytes = 0; CubDebugExit(DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, max_total_points)); return temp_storage_bytes; } __device__ int Identify( const point_data k, const uint Level, uint* PrefixSum, uchar* Oroot, uint offset) { int maxval = (0x1 << Level) - 1; // seems you could do this better using Morton codes if (k.x < 0 || k.y < 0 || k.z < 0 || k.x > maxval || k.y > maxval || k.z > maxval) return -1; int ord = 0; for (uint l = 0; l < Level; l++) { uint depth = Level - l - 1; uint mask = (0x1 << depth); uint child_idx = ((mask&k.x) << 2 | (mask&k.y) << 1 | (mask&k.z)) >> depth; uchar bits = Oroot[ord]; // if bit set, keep going if (bits&(0x1 << child_idx)) { // count set bits up to child - inclusive sum uint cnt = __popc(bits&((0x2 << child_idx) - 1)); ord = PrefixSum[ord] + cnt; if (depth == 0) return ord - offset; } else return -1; } return ord; // only if called with Level=0 } __global__ void GenerateKernelMap( const uint num, const point_data* Pdata, int* Inmap, int* Outmap, uint* Info, const uint K, const point_data* Kvec, const int scale, uchar* Oroot, uint* PrefixSum, uint level, uint offset) { int o_idx = blockDim.x * blockIdx.x + threadIdx.x; if (o_idx < num) { point_data V = mul_point_data(scale, Pdata[o_idx]); Outmap[o_idx] = o_idx; for (int k = 0; k < K; k++) { int i_idx = Identify(add_point_data(V, Kvec[k]), level, PrefixSum, Oroot, offset); Inmap[k*num + o_idx] = i_idx; Info[k*num + o_idx] = i_idx == -1 ? 0 : 1; } } } __global__ void GenerateKernelMapTrans( const uint num, const point_data* Pdata, int* Inmap, int* Outmap, uint* Info, const uint K, const point_data* Kvec, const int scale, uchar* Oroot, uint* PrefixSum, uint level, uint offset) { int o_idx = blockDim.x * blockIdx.x + threadIdx.x; if (o_idx < num) { point_data V = Pdata[o_idx]; Outmap[o_idx] = o_idx; for (int k = 0; k < K; k++) { point_data U = sub_point_data(V, Kvec[k]); if (U.x%scale == 0 && U.y%scale == 0 && U.z%scale == 0) { int i_idx = Identify(div_point_data(U, scale), level, PrefixSum, Oroot, offset); Inmap[k*num + o_idx] = i_idx; Info[k*num + o_idx] = i_idx == -1 ? 0 : 1; } else { Info[k*num + o_idx] = 0; } } } } __global__ void CompactifyMaps( const uint Psize, const uint num, const int *Inmap, const int *Outmap, int *InmapX, int *OutmapX, const uint *Info, const uint *PrefixSum) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < Psize) { if (Info[tidx] != 0) { uint IdxOut = PrefixSum[tidx] - 1; InmapX[IdxOut] = Inmap[tidx]; OutmapX[IdxOut] = Outmap[tidx % num]; } } } void ProcessKernelMaps( uint K, uint Cnt, pInOutMaps<int32_t> &in_map, pInOutMaps<int32_t> &out_map, uint* Info, uint* PSum, void* d_temp_storageA, size_t temp_storage_bytesA, int* Inmap, int* Outmap, int* InmapX, int* OutmapX) { DeviceScan::InclusiveSum(d_temp_storageA, temp_storage_bytesA, Info, PSum, K*Cnt); CUDA_CHECK(hipGetLastError()); hipLaunchKernelGGL(( CompactifyMaps), dim3((K*Cnt + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, K*Cnt, Cnt, Inmap, Outmap, InmapX, OutmapX, Info, PSum); CUDA_CHECK(hipGetLastError()); in_map.clear(); out_map.clear(); int currSum, prevSum = 0; int size = 0; int* Ix = InmapX; int* Ox = OutmapX; for (int k = 0; k < K; k++) { hipMemcpy(&currSum, PSum + (k + 1)*Cnt - 1, sizeof(int), hipMemcpyDeviceToHost); size = currSum - prevSum; in_map.push_back(pVector<int>(Ix, size)); out_map.push_back(pVector<int>(Ox, size)); prevSum = currSum; Ix += size; Ox += size; } CUDA_CHECK(hipGetLastError()); } void Conv3d_forward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Output, int M, float* Params, point_data* Kvec, uint Ksize, int Jump, int Qlevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; float* Y = Output; int Plevel = Qlevel - Jump; uint scale_factor = 0x1 << (Qlevel - Plevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Qlevel, Olevel); hipLaunchKernelGGL(( GenerateKernelMap), dim3((Psize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, Psize, d_Proot + GetPyramid(Pyramid, batch, 1, Plevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Qlevel, offset); CUDA_CHECK(hipGetLastError()); ProcessKernelMaps( Ksize, Psize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); CUDA_CHECK(hipGetLastError()); hipblasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionForwardKernelGPU<float, int32_t>( X, N,// input Y, M, Params, d_inmap, d_outmap, Psize, handle, 0); CUDA_CHECK(hipGetLastError()); X += N * Qsize; Y += M * Psize; d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } CUDA_CHECK(hipGetLastError()); } void Conv3d_backward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Grad_Inputs, float* Grad_Outputs, int M, float* Params, float* Grad_Params, point_data* Kvec, uint Ksize, int Jump, int Plevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; int Qlevel = Plevel + Jump; TORCH_CHECK(Qlevel <= Olevel, "Level + jump must be lower or equal than the depth of the octree."); uint scale_factor = 0x1 << (Qlevel - Plevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Plevel, Olevel); hipLaunchKernelGGL(( GenerateKernelMapTrans), dim3((Qsize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, Qsize, d_Proot + GetPyramid(Pyramid, batch, 1, Qlevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Plevel, offset); CUDA_CHECK(hipGetLastError()); ProcessKernelMaps( Ksize, Qsize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); hipblasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionBackwardKernelGPU<float, int32_t>( X, Grad_Inputs, N, Grad_Outputs, M, Params, Grad_Params, d_outmap, d_inmap, Psize, // note the swapping of i/o maps handle, 0); CUDA_CHECK(hipGetLastError()); X += N * Qsize; Grad_Inputs += N * Qsize; Grad_Outputs += M * Psize; d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } } void ConvTranspose3d_forward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Output, int M, float* Params, point_data* Kvec, uint Ksize, int Jump, int Qlevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; float* Y = Output; int Plevel = Qlevel + Jump; TORCH_CHECK(Plevel <= Olevel, "Level + jump must be lower or equal than the depth of the octree."); uint scale_factor = 0x1 << (Plevel - Qlevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Qlevel, Olevel); hipLaunchKernelGGL(( GenerateKernelMapTrans), dim3((Psize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, Psize, d_Proot + GetPyramid(Pyramid, batch, 1, Plevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Qlevel, offset); CUDA_CHECK(hipGetLastError()); ProcessKernelMaps( Ksize, Psize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); hipblasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionForwardKernelGPU<float, int32_t>( X, N,// input Y, M, Params, d_inmap, d_outmap, Psize, handle, 0); CUDA_CHECK(hipGetLastError()); d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); X += N * Qsize; Y += M * Psize; dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } CUDA_CHECK(hipGetLastError()); } void ConvTranspose3d_backward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Grad_Inputs, float* Grad_Outputs, int M, float* Params, float* Grad_Params, point_data* Kvec, uint Ksize, int Jump, int Plevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; int Qlevel = Plevel - Jump; TORCH_CHECK(Qlevel >= 0, "level - jump must be positive"); uint scale_factor = 0x1 << (Plevel - Qlevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Plevel, Olevel); hipLaunchKernelGGL(( GenerateKernelMap), dim3((Qsize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, Qsize, d_Proot + GetPyramid(Pyramid, batch, 1, Qlevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Plevel, offset); CUDA_CHECK(hipGetLastError()); ProcessKernelMaps( Ksize, Qsize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); hipblasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionBackwardKernelGPU<float, int32_t>( X, Grad_Inputs, N, Grad_Outputs, M, Params, Grad_Params, d_outmap, d_inmap, Psize, handle, 0); CUDA_CHECK(hipGetLastError()); d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); X += N * Qsize; Grad_Inputs += N * Qsize; Grad_Outputs += M * Psize; dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } } } // namespace kaolin
6428f77213a0829569763014850960be2df0e289.cu
// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. // All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "../../utils.h" #include "convolution.cuh" #define CUB_NS_PREFIX namespace kaolin { #define CUB_NS_POSTFIX } #include <cub/device/device_scan.cuh> namespace kaolin { using namespace cub; #define THREADS_PER_BLOCK 64 namespace minkowski { template <typename Dtype, typename Itype> void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel, Dtype *d_out_feat, int out_nchannel, const Dtype *d_kernel, const pInOutMaps<Itype> &in_map, const pInOutMaps<Itype> &out_map, int out_nrows, cublasHandle_t cuhandle, cudaStream_t stream); template <typename Dtype, typename Itype> void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat, int in_nchannel, const Dtype *d_grad_out_feat, int out_nchannel, const Dtype *d_kernel, Dtype *d_grad_kernel, const pInOutMaps<Itype> &in_map, const pInOutMaps<Itype> &out_map, int out_nrows, cublasHandle_t cuhandle, cudaStream_t stream); } //end namespace minkowski uint GetPyramid(uint* Pyramid, int batch, int k, int level, int olevel) { return Pyramid[(2 * batch + k) * (olevel + 2) + level]; } uint64_t GetStorageBytesX(void* d_temp_storage, uint* d_Info, uint* d_PrefixSum, uint max_total_points) { uint64_t temp_storage_bytes = 0; CubDebugExit(DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, max_total_points)); return temp_storage_bytes; } __device__ int Identify( const point_data k, const uint Level, uint* PrefixSum, uchar* Oroot, uint offset) { int maxval = (0x1 << Level) - 1; // seems you could do this better using Morton codes if (k.x < 0 || k.y < 0 || k.z < 0 || k.x > maxval || k.y > maxval || k.z > maxval) return -1; int ord = 0; for (uint l = 0; l < Level; l++) { uint depth = Level - l - 1; uint mask = (0x1 << depth); uint child_idx = ((mask&k.x) << 2 | (mask&k.y) << 1 | (mask&k.z)) >> depth; uchar bits = Oroot[ord]; // if bit set, keep going if (bits&(0x1 << child_idx)) { // count set bits up to child - inclusive sum uint cnt = __popc(bits&((0x2 << child_idx) - 1)); ord = PrefixSum[ord] + cnt; if (depth == 0) return ord - offset; } else return -1; } return ord; // only if called with Level=0 } __global__ void GenerateKernelMap( const uint num, const point_data* Pdata, int* Inmap, int* Outmap, uint* Info, const uint K, const point_data* Kvec, const int scale, uchar* Oroot, uint* PrefixSum, uint level, uint offset) { int o_idx = blockDim.x * blockIdx.x + threadIdx.x; if (o_idx < num) { point_data V = mul_point_data(scale, Pdata[o_idx]); Outmap[o_idx] = o_idx; for (int k = 0; k < K; k++) { int i_idx = Identify(add_point_data(V, Kvec[k]), level, PrefixSum, Oroot, offset); Inmap[k*num + o_idx] = i_idx; Info[k*num + o_idx] = i_idx == -1 ? 0 : 1; } } } __global__ void GenerateKernelMapTrans( const uint num, const point_data* Pdata, int* Inmap, int* Outmap, uint* Info, const uint K, const point_data* Kvec, const int scale, uchar* Oroot, uint* PrefixSum, uint level, uint offset) { int o_idx = blockDim.x * blockIdx.x + threadIdx.x; if (o_idx < num) { point_data V = Pdata[o_idx]; Outmap[o_idx] = o_idx; for (int k = 0; k < K; k++) { point_data U = sub_point_data(V, Kvec[k]); if (U.x%scale == 0 && U.y%scale == 0 && U.z%scale == 0) { int i_idx = Identify(div_point_data(U, scale), level, PrefixSum, Oroot, offset); Inmap[k*num + o_idx] = i_idx; Info[k*num + o_idx] = i_idx == -1 ? 0 : 1; } else { Info[k*num + o_idx] = 0; } } } } __global__ void CompactifyMaps( const uint Psize, const uint num, const int *Inmap, const int *Outmap, int *InmapX, int *OutmapX, const uint *Info, const uint *PrefixSum) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; if (tidx < Psize) { if (Info[tidx] != 0) { uint IdxOut = PrefixSum[tidx] - 1; InmapX[IdxOut] = Inmap[tidx]; OutmapX[IdxOut] = Outmap[tidx % num]; } } } void ProcessKernelMaps( uint K, uint Cnt, pInOutMaps<int32_t> &in_map, pInOutMaps<int32_t> &out_map, uint* Info, uint* PSum, void* d_temp_storageA, size_t temp_storage_bytesA, int* Inmap, int* Outmap, int* InmapX, int* OutmapX) { DeviceScan::InclusiveSum(d_temp_storageA, temp_storage_bytesA, Info, PSum, K*Cnt); CUDA_CHECK(cudaGetLastError()); CompactifyMaps<<<(K*Cnt + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( K*Cnt, Cnt, Inmap, Outmap, InmapX, OutmapX, Info, PSum); CUDA_CHECK(cudaGetLastError()); in_map.clear(); out_map.clear(); int currSum, prevSum = 0; int size = 0; int* Ix = InmapX; int* Ox = OutmapX; for (int k = 0; k < K; k++) { cudaMemcpy(&currSum, PSum + (k + 1)*Cnt - 1, sizeof(int), cudaMemcpyDeviceToHost); size = currSum - prevSum; in_map.push_back(pVector<int>(Ix, size)); out_map.push_back(pVector<int>(Ox, size)); prevSum = currSum; Ix += size; Ox += size; } CUDA_CHECK(cudaGetLastError()); } void Conv3d_forward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Output, int M, float* Params, point_data* Kvec, uint Ksize, int Jump, int Qlevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; float* Y = Output; int Plevel = Qlevel - Jump; uint scale_factor = 0x1 << (Qlevel - Plevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Qlevel, Olevel); GenerateKernelMap<<<(Psize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( Psize, d_Proot + GetPyramid(Pyramid, batch, 1, Plevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Qlevel, offset); CUDA_CHECK(cudaGetLastError()); ProcessKernelMaps( Ksize, Psize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); CUDA_CHECK(cudaGetLastError()); cublasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionForwardKernelGPU<float, int32_t>( X, N,// input Y, M, Params, d_inmap, d_outmap, Psize, handle, 0); CUDA_CHECK(cudaGetLastError()); X += N * Qsize; Y += M * Psize; d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } CUDA_CHECK(cudaGetLastError()); } void Conv3d_backward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Grad_Inputs, float* Grad_Outputs, int M, float* Params, float* Grad_Params, point_data* Kvec, uint Ksize, int Jump, int Plevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; int Qlevel = Plevel + Jump; TORCH_CHECK(Qlevel <= Olevel, "Level + jump must be lower or equal than the depth of the octree."); uint scale_factor = 0x1 << (Qlevel - Plevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Plevel, Olevel); GenerateKernelMapTrans<<<(Qsize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( Qsize, d_Proot + GetPyramid(Pyramid, batch, 1, Qlevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Plevel, offset); CUDA_CHECK(cudaGetLastError()); ProcessKernelMaps( Ksize, Qsize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); cublasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionBackwardKernelGPU<float, int32_t>( X, Grad_Inputs, N, Grad_Outputs, M, Params, Grad_Params, d_outmap, d_inmap, Psize, // note the swapping of i/o maps handle, 0); CUDA_CHECK(cudaGetLastError()); X += N * Qsize; Grad_Inputs += N * Qsize; Grad_Outputs += M * Psize; d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } } void ConvTranspose3d_forward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Output, int M, float* Params, point_data* Kvec, uint Ksize, int Jump, int Qlevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; float* Y = Output; int Plevel = Qlevel + Jump; TORCH_CHECK(Plevel <= Olevel, "Level + jump must be lower or equal than the depth of the octree."); uint scale_factor = 0x1 << (Plevel - Qlevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Qlevel, Olevel); GenerateKernelMapTrans<<<(Psize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( Psize, d_Proot + GetPyramid(Pyramid, batch, 1, Plevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Qlevel, offset); CUDA_CHECK(cudaGetLastError()); ProcessKernelMaps( Ksize, Psize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); cublasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionForwardKernelGPU<float, int32_t>( X, N,// input Y, M, Params, d_inmap, d_outmap, Psize, handle, 0); CUDA_CHECK(cudaGetLastError()); d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); X += N * Qsize; Y += M * Psize; dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } CUDA_CHECK(cudaGetLastError()); } void ConvTranspose3d_backward_cuda( point_data* d_Proot, uchar* dO, uint* dP, float* Input, int N, float* Grad_Inputs, float* Grad_Outputs, int M, float* Params, float* Grad_Params, point_data* Kvec, uint Ksize, int Jump, int Plevel, int Olevel, int BatchSize, uint* Pyramid, uint* d_Info, uint* d_PSum, void* d_temp_storageA, int64_t temp_storage_bytesA, int* d_Inmap, int* d_Outmap, int* d_InmapX, int* d_OutmapX) { pInOutMaps<int32_t> d_inmap; pInOutMaps<int32_t> d_outmap; float* X = Input; int Qlevel = Plevel - Jump; TORCH_CHECK(Qlevel >= 0, "level - jump must be positive"); uint scale_factor = 0x1 << (Plevel - Qlevel); for (uint batch = 0; batch < BatchSize; batch++) { uint Qsize = GetPyramid(Pyramid, batch, 0, Qlevel, Olevel); uint Psize = GetPyramid(Pyramid, batch, 0, Plevel, Olevel); uint offset = GetPyramid(Pyramid, batch, 1, Plevel, Olevel); GenerateKernelMap<<<(Qsize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( Qsize, d_Proot + GetPyramid(Pyramid, batch, 1, Qlevel, Olevel), d_Inmap, d_Outmap, d_Info, Ksize, Kvec, scale_factor, dO, dP, Plevel, offset); CUDA_CHECK(cudaGetLastError()); ProcessKernelMaps( Ksize, Qsize, d_inmap, d_outmap, d_Info, d_PSum, d_temp_storageA, temp_storage_bytesA, d_Inmap, d_Outmap, d_InmapX, d_OutmapX); cublasHandle_t handle = NULL; //TODO: get from Pytorch (and stream) minkowski::ConvolutionBackwardKernelGPU<float, int32_t>( X, Grad_Inputs, N, Grad_Outputs, M, Params, Grad_Params, d_outmap, d_inmap, Psize, handle, 0); CUDA_CHECK(cudaGetLastError()); d_Proot += GetPyramid(Pyramid, batch, 1, Olevel + 1, Olevel); X += N * Qsize; Grad_Inputs += N * Qsize; Grad_Outputs += M * Psize; dO += GetPyramid(Pyramid, batch, 1, Olevel, Olevel); dP += GetPyramid(Pyramid, batch, 1, Olevel, Olevel) + 1; } } } // namespace kaolin
a6297ce7e57ed872664ccead9d667e22b59a393f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> const int SIZE = 5; void addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void init_vector(int *v) { for (int i = 0; i < N; ++i) v[i] = i; } int main() { const int a[SIZE]; const int b[SIZE]; int c[SIZE]; init_vector(a); init_vector(b); addWithCuda(c, a, b, SIZE); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", a[i]); printf("} + "); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", b[i]); printf("} =\n"); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", c[i]); printf("}\n"); hipDeviceReset(); return 0; } void addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipSetDevice(0); hipMalloc((void**)&dev_c, size * sizeof(int)); hipMalloc((void**)&dev_a, size * sizeof(int)); hipMalloc((void**)&dev_b, size * sizeof(int)); hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return; }
a6297ce7e57ed872664ccead9d667e22b59a393f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> const int SIZE = 5; void addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void init_vector(int *v) { for (int i = 0; i < N; ++i) v[i] = i; } int main() { const int a[SIZE]; const int b[SIZE]; int c[SIZE]; init_vector(a); init_vector(b); addWithCuda(c, a, b, SIZE); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", a[i]); printf("} + "); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", b[i]); printf("} =\n"); printf("{"); for (int i = 0; i < SIZE; ++i) printf("%d, ", c[i]); printf("}\n"); cudaDeviceReset(); return 0; } void addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaSetDevice(0); cudaMalloc((void**)&dev_c, size * sizeof(int)); cudaMalloc((void**)&dev_a, size * sizeof(int)); cudaMalloc((void**)&dev_b, size * sizeof(int)); cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); addKernel<<<1, size>>>(dev_c, dev_a, dev_b); cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return; }
cf67308def9425de22e6c05108fd77476de40788.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <hip/hip_runtime.h> #include <boost/format.hpp> #include <stack> #include <numeric> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __forceinline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } extern __shared__ float arr_sh[]; __global__ void apply_gradient_kernel( float * __restrict data, float * __restrict gradient, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float upd = learning_rate * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } __global__ void apply_gradient_with_vanilla_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, float momentum, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float prev_upd = __load_nc(previous_upd + elem_id); float upd = prev_upd * momentum + learning_rate * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = upd; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } __global__ void apply_gradient_with_nesterov_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, float momentum, float momentum_plus1, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float prev_upd = __load_nc(previous_upd + elem_id); float new_upd = prev_upd * momentum + learning_rate * (gr * normalizer - current_weight * weight_decay); float upd = momentum_plus1 * new_upd - momentum * prev_upd; float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = new_upd; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } const unsigned int network_updater_cuda::max_entry_count_in_single_batch = 4096; const unsigned int network_updater_cuda::elem_count_update_accum_per_part = 64; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef) , cuda_config(cuda_config) { cuda_config->set_device(); const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); error_function_fused_with_activation = (ef_updater->get_fusable_activation_uuid() == layer_list.back()->get_uuid()); for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) { if ((it != layer_list.end() - 1) || (!error_function_fused_with_activation)) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); } setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::pair<testing_result_smart_ptr, training_stat_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<std::vector<float> >& learning_rates, network_data_smart_ptr data, network_data_smart_ptr momentum_data, unsigned int batch_size, float weight_decay, training_momentum momentum, bool deterministic_only) { cuda_config->set_device(); testing_result_smart_ptr testing_res(new testing_result(ef)); reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); unsigned int output_neuron_count_per_feature_map = output_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); if (error_function_fused_with_activation && (output_neuron_count_per_feature_map != 1)) throw neural_network_exception("Error function is fused with activation but output_neuron_count_per_feature_map is not equal 1: not implemented"); unsigned int part_count = 0; for(layer_data_list::const_iterator it = data->data_list.begin(); it != data->data_list.end(); ++it) part_count += static_cast<unsigned int>((*it)->size()); unsigned int elem_count_update_accum = part_count * elem_count_update_accum_per_part; std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data_custom = set_get_data_custom(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > gradient = get_zero_gradient(net_data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > previous_upd; if (momentum.type != training_momentum::no_momentum) previous_upd = get_data(momentum_data); unsigned int updater_max_count; { buffer_cuda_size_configuration buffers_config; buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // initial error buffers_config.add_constant_buffer(sizeof(double)); // error buffer buffers_config.add_constant_buffer(sizeof(double) * elem_count_update_accum); // update_accum for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffers_config); updater_max_count = cuda_config->get_max_entry_count(buffers_config, 0.9F); } unsigned int updater_entry_count; std::vector<unsigned int> entry_read_count_list; unsigned int max_entry_read_count; if (updater_max_count > batch_size) updater_entry_count = batch_size; else { unsigned int it_count = (batch_size + updater_max_count - 1) / updater_max_count; updater_entry_count = (batch_size + it_count - 1) / it_count; max_entry_read_count = updater_entry_count; unsigned int sum = 0; while (sum < batch_size) { unsigned int new_item = ::min(batch_size - sum, updater_entry_count); sum += new_item; entry_read_count_list.push_back(new_item); } } { buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double)); // error buffer buffers_config.add_constant_buffer(sizeof(double) * elem_count_update_accum); // update_accum for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = ::min(::min(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); if (entry_read_count_list.empty() || (max_entry_count >= batch_size)) { unsigned int it_count = ::min(::max(max_entry_count / batch_size, 1U), 8U); max_entry_read_count = it_count * batch_size; entry_read_count_list.clear(); entry_read_count_list.push_back(max_entry_read_count); } } cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(sizeof(double))); cuda_linear_buffer_device_smart_ptr update_accum_buf(new cuda_linear_buffer_device(elem_count_update_accum * sizeof(double))); cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_read_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_read_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_read_count * sizeof(float))); float * output = *output_host_buf; // set error to zero cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, 1, *command_stream); // set update accumulators to zero cuda_util::set_with_value( *cuda_config, (double *)(*update_accum_buf), 0.0, elem_count_update_accum, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } unsigned int entries_processed_count = 0; unsigned int entry_read_count_index = 0; unsigned int entry_gradient_calculated_count = 0; unsigned int gradient_applied_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(entry_read_count_list[entry_read_count_index], entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), cuda_config, *data_stream); async_reader.start(); entry_read_count_index++; if (entry_read_count_index >= entry_read_count_list.size()) entry_read_count_index = 0; } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( convert_compacted_to_raw_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream, *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(hipMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } unsigned int base_input_entry_id = 0; while(base_input_entry_id < entries_available_for_processing_count) { std::stack<unsigned int> offset_list; unsigned int current_updater_entry_count = ::min(::min(entries_available_for_processing_count - base_input_entry_id, updater_entry_count), batch_size - entry_gradient_calculated_count); // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_id, ++layer_config_it) { /* { cuda_linear_buffer_device_smart_ptr buf = input_and_all_buffers_pack_it->first; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_neurons_%1%.txt") % layer_id).str()).string().c_str()); } */ (*it)->enqueue_test( (it == updater_list.begin()) ? base_input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* { cuda_linear_buffer_device_smart_ptr buf = input_and_all_buffers_pack_it->second.output_neurons_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("output_neurons_%1%.txt") % layer_id).str()).string().c_str()); } */ } } // Compute errors { if (error_function_fused_with_activation) ef_updater->enqueue_update_error_and_gradient_fused_with_activation( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); else ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator gradient_it = gradient.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + (1 + (error_function_fused_with_activation ? 1 : 0)); for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++gradient_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it, --reverse_layer_id, ++layer_config_it) { (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? base_input_entry_id : 0, *command_stream, *gradient_it, *net_data_custom_it, *schema_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* for(int part_id = 0; part_id < gradient_it->size(); ++part_id) { cuda_linear_buffer_device_smart_ptr buf = gradient_it->at(part_id); std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%_%2%.txt") % reverse_layer_id % part_id).str()).string().c_str()); } */ if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ } } } base_input_entry_id += current_updater_entry_count; entry_gradient_calculated_count += current_updater_entry_count; if (entry_gradient_calculated_count >= batch_size) { float gradient_normalizer = 1.0F / static_cast<float>(::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rates, update_accum_buf, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; ++gradient_applied_count; } if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // while(base_input_entry_id < entries_available_for_processing_count) entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(hipStreamSynchronize(*data_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } if (entry_gradient_calculated_count > 0) { float gradient_normalizer = 1.0F / static_cast<float>(::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rates, update_accum_buf, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; ++gradient_applied_count; } read_data(net_data, data, *command_stream); if (momentum.type != training_momentum::no_momentum) read_data(previous_upd, momentum_data, *command_stream); double error; cuda_safe_call(hipMemcpyAsync(&error, *error_buf, sizeof(double), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); testing_res->init(error, entries_processed_count); training_stat_smart_ptr training_res = read_update_accum( update_accum_buf, data, gradient_applied_count, *command_stream); return std::make_pair(testing_res, training_res); } training_stat_smart_ptr network_updater_cuda::read_update_accum( const_cuda_linear_buffer_device_smart_ptr update_accum, network_data_smart_ptr data, unsigned int gradient_applied_count, hipStream_t stream_id) const { training_stat_smart_ptr training_res(new training_stat()); float mult = 1.0F / static_cast<float>(gradient_applied_count); std::vector<double> pack(update_accum->get_size() / sizeof(double)); cuda_safe_call(hipMemcpyAsync(&(*pack.begin()), *update_accum, update_accum->get_size(), hipMemcpyDeviceToHost, stream_id)); cuda_safe_call(hipStreamSynchronize(stream_id)); std::vector<double>::const_iterator current_accum_it = pack.begin(); for(layer_data_list::const_iterator it = data->data_list.begin(); it != data->data_list.end(); ++it) { std::vector<float> layer_stat; for(std::vector<std::vector<float> >::const_iterator it2 = (*it)->begin(); it2 != (*it)->end(); ++it2) { size_t elem_count = it2->size(); double sum = std::accumulate( current_accum_it, current_accum_it + elem_count_update_accum_per_part, 0.0); float val = static_cast<float>(sum) * mult / static_cast<float>(elem_count); layer_stat.push_back(val); current_accum_it += elem_count_update_accum_per_part; } training_res->absolute_updates.push_back(layer_stat); } return training_res; } void network_updater_cuda::layer_config_list_modified() { cuda_config->set_device(); layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(network_data_const_smart_ptr data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data->data_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::set_get_data_custom(network_data_const_smart_ptr data) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->set_get_data_custom(data->data_custom_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_zero_gradient(const std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = data.begin(); it != data.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data; for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) { size_t buf_size = (*it2)->get_size(); cuda_linear_buffer_device_smart_ptr buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(buf_size)); cuda_util::set_with_value( *cuda_config, *buf, 0.0F, static_cast<int>(buf_size / sizeof(float)), 0); device_data.push_back(buf); } res.push_back(device_data); } cuda_safe_call(hipStreamSynchronize(0)); return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, network_data_smart_ptr res, hipStream_t stream_id) const { unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) updater_list[layer_id]->get_data_from_device(*src_it, res->data_list[layer_id + testing_layer_count]); } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, 1); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); } void network_updater_cuda::enqueue_apply_gradient( hipStream_t stream_id, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& gradient, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& prev_upd, const std::vector<std::vector<float> >& learning_rates, cuda_linear_buffer_device_smart_ptr update_accum, float gradient_normalizer, float weight_decay, training_momentum momentum) { const const_layer_list& layer_list = *schema; const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<float> >::const_iterator learning_rate_it = learning_rates.begin() + testing_layer_count; unsigned int total_part_id = 0; if (momentum.type != training_momentum::no_momentum) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator prev_upd_it = prev_upd.begin(); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++prev_upd_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<cuda_linear_buffer_device_smart_ptr>::iterator prev_upd_it2 = prev_upd_it->begin(); std::vector<float>::const_iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++prev_upd_it2, ++learning_rate_it2, ++part_id) { float learning_rate = *learning_rate_it2; float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = static_cast<int>((*data_it2)->get_size() / sizeof(float)); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count, 1, 1, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * sizeof(float); if (momentum.type == training_momentum::vanilla_momentum) { hipLaunchKernelGGL(( apply_gradient_with_vanilla_momentum_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, **data_it2, **gradient_it2, **prev_upd_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, momentum.momentum_val, elem_count, elem_count_update_accum_per_part - 1); } else if (momentum.type == training_momentum::nesterov_momentum) { hipLaunchKernelGGL(( apply_gradient_with_nesterov_momentum_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, **data_it2, **gradient_it2, **prev_upd_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, momentum.momentum_val, momentum.momentum_val + 1.0F, elem_count, elem_count_update_accum_per_part - 1); } ++total_part_id; } } } else { for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<float>::const_iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++learning_rate_it2, ++part_id) { float learning_rate = *learning_rate_it2; float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = static_cast<int>((*data_it2)->get_size() / sizeof(float)); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count, 1, 1, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * sizeof(float); hipLaunchKernelGGL(( apply_gradient_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, **data_it2, **gradient_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, elem_count, elem_count_update_accum_per_part - 1); ++total_part_id; } } } } } }
cf67308def9425de22e6c05108fd77476de40788.cu
/* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <cuda_runtime.h> #include <boost/format.hpp> #include <stack> #include <numeric> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __forceinline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } extern __shared__ float arr_sh[]; __global__ void apply_gradient_kernel( float * __restrict data, float * __restrict gradient, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float upd = learning_rate * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } __global__ void apply_gradient_with_vanilla_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, float momentum, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float prev_upd = __load_nc(previous_upd + elem_id); float upd = prev_upd * momentum + learning_rate * (gr * normalizer - current_weight * weight_decay); float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = upd; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } __global__ void apply_gradient_with_nesterov_momentum_kernel( float * __restrict data, float * __restrict gradient, float * __restrict previous_upd, double * __restrict update_accum, float learning_rate, float normalizer, float weight_decay, float momentum, float momentum_plus1, int elem_count, unsigned int update_accum_mask) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; float upd_acc = 0.0F; if (elem_id < elem_count) { float current_weight = __load_nc(data + elem_id); float gr = __load_nc(gradient + elem_id); float prev_upd = __load_nc(previous_upd + elem_id); float new_upd = prev_upd * momentum + learning_rate * (gr * normalizer - current_weight * weight_decay); float upd = momentum_plus1 * new_upd - momentum * prev_upd; float new_weight = current_weight + upd; data[elem_id] = new_weight; gradient[elem_id] = 0.0F; previous_upd[elem_id] = new_upd; upd_acc = fabs(upd); } int thread_id = threadIdx.x; int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { upd_acc += __shfl_down(upd_acc, tx); } if (blockDim.x > 32) { if (lane_id == 0) arr_sh[thread_id >> 5] = upd_acc; __syncthreads(); } if (thread_id == 0) { for(int i = 1; i < (blockDim.x >> 5); ++i) upd_acc += arr_sh[i]; double upd_acc_d = (double)upd_acc; int accum_bucket_id = (elem_id >> 5) & update_accum_mask; atomicAdd(update_accum + accum_bucket_id, upd_acc_d); } } const unsigned int network_updater_cuda::max_entry_count_in_single_batch = 4096; const unsigned int network_updater_cuda::elem_count_update_accum_per_part = 64; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef) , cuda_config(cuda_config) { cuda_config->set_device(); const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); error_function_fused_with_activation = (ef_updater->get_fusable_activation_uuid() == layer_list.back()->get_uuid()); for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) { if ((it != layer_list.end() - 1) || (!error_function_fused_with_activation)) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); } setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::pair<testing_result_smart_ptr, training_stat_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<std::vector<float> >& learning_rates, network_data_smart_ptr data, network_data_smart_ptr momentum_data, unsigned int batch_size, float weight_decay, training_momentum momentum, bool deterministic_only) { cuda_config->set_device(); testing_result_smart_ptr testing_res(new testing_result(ef)); reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); unsigned int output_neuron_count_per_feature_map = output_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); if (error_function_fused_with_activation && (output_neuron_count_per_feature_map != 1)) throw neural_network_exception("Error function is fused with activation but output_neuron_count_per_feature_map is not equal 1: not implemented"); unsigned int part_count = 0; for(layer_data_list::const_iterator it = data->data_list.begin(); it != data->data_list.end(); ++it) part_count += static_cast<unsigned int>((*it)->size()); unsigned int elem_count_update_accum = part_count * elem_count_update_accum_per_part; std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data_custom = set_get_data_custom(data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > gradient = get_zero_gradient(net_data); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > previous_upd; if (momentum.type != training_momentum::no_momentum) previous_upd = get_data(momentum_data); unsigned int updater_max_count; { buffer_cuda_size_configuration buffers_config; buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // initial error buffers_config.add_constant_buffer(sizeof(double)); // error buffer buffers_config.add_constant_buffer(sizeof(double) * elem_count_update_accum); // update_accum for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffers_config); updater_max_count = cuda_config->get_max_entry_count(buffers_config, 0.9F); } unsigned int updater_entry_count; std::vector<unsigned int> entry_read_count_list; unsigned int max_entry_read_count; if (updater_max_count > batch_size) updater_entry_count = batch_size; else { unsigned int it_count = (batch_size + updater_max_count - 1) / updater_max_count; updater_entry_count = (batch_size + it_count - 1) / it_count; max_entry_read_count = updater_entry_count; unsigned int sum = 0; while (sum < batch_size) { unsigned int new_item = std::min(batch_size - sum, updater_entry_count); sum += new_item; entry_read_count_list.push_back(new_item); } } { buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double)); // error buffer buffers_config.add_constant_buffer(sizeof(double) * elem_count_update_accum); // update_accum for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data_custom.begin(); it != net_data_custom.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = gradient.begin(); it != gradient.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = previous_upd.begin(); it != previous_upd.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min(std::min(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); if (entry_read_count_list.empty() || (max_entry_count >= batch_size)) { unsigned int it_count = std::min(std::max(max_entry_count / batch_size, 1U), 8U); max_entry_read_count = it_count * batch_size; entry_read_count_list.clear(); entry_read_count_list.push_back(max_entry_read_count); } } cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_read_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_read_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(sizeof(double))); cuda_linear_buffer_device_smart_ptr update_accum_buf(new cuda_linear_buffer_device(elem_count_update_accum * sizeof(double))); cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_read_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_read_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_read_count * sizeof(float))); float * output = *output_host_buf; // set error to zero cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, 1, *command_stream); // set update accumulators to zero cuda_util::set_with_value( *cuda_config, (double *)(*update_accum_buf), 0.0, elem_count_update_accum, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } unsigned int entries_processed_count = 0; unsigned int entry_read_count_index = 0; unsigned int entry_gradient_calculated_count = 0; unsigned int gradient_applied_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(entry_read_count_list[entry_read_count_index], entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), cuda_config, *data_stream); async_reader.start(); entry_read_count_index++; if (entry_read_count_index >= entry_read_count_list.size()) entry_read_count_index = 0; } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); convert_compacted_to_raw_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>( *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(cudaMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } unsigned int base_input_entry_id = 0; while(base_input_entry_id < entries_available_for_processing_count) { std::stack<unsigned int> offset_list; unsigned int current_updater_entry_count = std::min(std::min(entries_available_for_processing_count - base_input_entry_id, updater_entry_count), batch_size - entry_gradient_calculated_count); // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_id, ++layer_config_it) { /* { cuda_linear_buffer_device_smart_ptr buf = input_and_all_buffers_pack_it->first; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_neurons_%1%.txt") % layer_id).str()).string().c_str()); } */ (*it)->enqueue_test( (it == updater_list.begin()) ? base_input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* { cuda_linear_buffer_device_smart_ptr buf = input_and_all_buffers_pack_it->second.output_neurons_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("output_neurons_%1%.txt") % layer_id).str()).string().c_str()); } */ } } // Compute errors { if (error_function_fused_with_activation) ef_updater->enqueue_update_error_and_gradient_fused_with_activation( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); else ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, base_input_entry_id, output_neuron_count, current_updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator gradient_it = gradient.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + (1 + (error_function_fused_with_activation ? 1 : 0)); for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++gradient_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it, --reverse_layer_id, ++layer_config_it) { (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? base_input_entry_id : 0, *command_stream, *gradient_it, *net_data_custom_it, *schema_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* for(int part_id = 0; part_id < gradient_it->size(); ++part_id) { cuda_linear_buffer_device_smart_ptr buf = gradient_it->at(part_id); std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%_%2%.txt") % reverse_layer_id % part_id).str()).string().c_str()); } */ if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, *net_data_custom_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, current_updater_entry_count, deterministic_only); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ } } } base_input_entry_id += current_updater_entry_count; entry_gradient_calculated_count += current_updater_entry_count; if (entry_gradient_calculated_count >= batch_size) { float gradient_normalizer = 1.0F / static_cast<float>(std::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rates, update_accum_buf, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; ++gradient_applied_count; } if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // while(base_input_entry_id < entries_available_for_processing_count) entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(cudaStreamSynchronize(*data_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } if (entry_gradient_calculated_count > 0) { float gradient_normalizer = 1.0F / static_cast<float>(std::max(batch_size, entry_gradient_calculated_count)); enqueue_apply_gradient( *command_stream, net_data, gradient, previous_upd, learning_rates, update_accum_buf, gradient_normalizer, weight_decay, momentum); entry_gradient_calculated_count = 0; ++gradient_applied_count; } read_data(net_data, data, *command_stream); if (momentum.type != training_momentum::no_momentum) read_data(previous_upd, momentum_data, *command_stream); double error; cuda_safe_call(cudaMemcpyAsync(&error, *error_buf, sizeof(double), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); testing_res->init(error, entries_processed_count); training_stat_smart_ptr training_res = read_update_accum( update_accum_buf, data, gradient_applied_count, *command_stream); return std::make_pair(testing_res, training_res); } training_stat_smart_ptr network_updater_cuda::read_update_accum( const_cuda_linear_buffer_device_smart_ptr update_accum, network_data_smart_ptr data, unsigned int gradient_applied_count, cudaStream_t stream_id) const { training_stat_smart_ptr training_res(new training_stat()); float mult = 1.0F / static_cast<float>(gradient_applied_count); std::vector<double> pack(update_accum->get_size() / sizeof(double)); cuda_safe_call(cudaMemcpyAsync(&(*pack.begin()), *update_accum, update_accum->get_size(), cudaMemcpyDeviceToHost, stream_id)); cuda_safe_call(cudaStreamSynchronize(stream_id)); std::vector<double>::const_iterator current_accum_it = pack.begin(); for(layer_data_list::const_iterator it = data->data_list.begin(); it != data->data_list.end(); ++it) { std::vector<float> layer_stat; for(std::vector<std::vector<float> >::const_iterator it2 = (*it)->begin(); it2 != (*it)->end(); ++it2) { size_t elem_count = it2->size(); double sum = std::accumulate( current_accum_it, current_accum_it + elem_count_update_accum_per_part, 0.0); float val = static_cast<float>(sum) * mult / static_cast<float>(elem_count); layer_stat.push_back(val); current_accum_it += elem_count_update_accum_per_part; } training_res->absolute_updates.push_back(layer_stat); } return training_res; } void network_updater_cuda::layer_config_list_modified() { cuda_config->set_device(); layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(network_data_const_smart_ptr data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data->data_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::set_get_data_custom(network_data_const_smart_ptr data) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->set_get_data_custom(data->data_custom_list[i + testing_layer_count]); res.push_back(device_data); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_zero_gradient(const std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = data.begin(); it != data.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> device_data; for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) { size_t buf_size = (*it2)->get_size(); cuda_linear_buffer_device_smart_ptr buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(buf_size)); cuda_util::set_with_value( *cuda_config, *buf, 0.0F, static_cast<int>(buf_size / sizeof(float)), 0); device_data.push_back(buf); } res.push_back(device_data); } cuda_safe_call(cudaStreamSynchronize(0)); return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, network_data_smart_ptr res, cudaStream_t stream_id) const { unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) updater_list[layer_id]->get_data_from_device(*src_it, res->data_list[layer_id + testing_layer_count]); } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, 1); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); } void network_updater_cuda::enqueue_apply_gradient( cudaStream_t stream_id, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& gradient, std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& prev_upd, const std::vector<std::vector<float> >& learning_rates, cuda_linear_buffer_device_smart_ptr update_accum, float gradient_normalizer, float weight_decay, training_momentum momentum) { const const_layer_list& layer_list = *schema; const_layer_list::const_iterator layer_it = layer_list.begin() + testing_layer_count; std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator gradient_it = gradient.begin(); std::vector<std::vector<float> >::const_iterator learning_rate_it = learning_rates.begin() + testing_layer_count; unsigned int total_part_id = 0; if (momentum.type != training_momentum::no_momentum) { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator prev_upd_it = prev_upd.begin(); for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++prev_upd_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<cuda_linear_buffer_device_smart_ptr>::iterator prev_upd_it2 = prev_upd_it->begin(); std::vector<float>::const_iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++prev_upd_it2, ++learning_rate_it2, ++part_id) { float learning_rate = *learning_rate_it2; float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = static_cast<int>((*data_it2)->get_size() / sizeof(float)); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count, 1, 1, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * sizeof(float); if (momentum.type == training_momentum::vanilla_momentum) { apply_gradient_with_vanilla_momentum_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( **data_it2, **gradient_it2, **prev_upd_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, momentum.momentum_val, elem_count, elem_count_update_accum_per_part - 1); } else if (momentum.type == training_momentum::nesterov_momentum) { apply_gradient_with_nesterov_momentum_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( **data_it2, **gradient_it2, **prev_upd_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, momentum.momentum_val, momentum.momentum_val + 1.0F, elem_count, elem_count_update_accum_per_part - 1); } ++total_part_id; } } } else { for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator data_it = data.begin(); data_it != data.end(); ++data_it, ++gradient_it, ++learning_rate_it, ++layer_it) { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator gradient_it2 = gradient_it->begin(); std::vector<float>::const_iterator learning_rate_it2 = learning_rate_it->begin(); std::set<unsigned int> weight_decay_part_id_set = (*layer_it)->get_weight_decay_part_id_set(); unsigned int part_id = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator data_it2 = data_it->begin(); data_it2 != data_it->end(); ++data_it2, ++gradient_it2, ++learning_rate_it2, ++part_id) { float learning_rate = *learning_rate_it2; float actual_weight_decay = (weight_decay_part_id_set.find(part_id) == weight_decay_part_id_set.end()) ? 0.0F : weight_decay; int elem_count = static_cast<int>((*data_it2)->get_size() / sizeof(float)); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count, 1, 1, 32); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * sizeof(float); apply_gradient_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( **data_it2, **gradient_it2, ((double *)(*update_accum)) + total_part_id * elem_count_update_accum_per_part, learning_rate, gradient_normalizer, actual_weight_decay, elem_count, elem_count_update_accum_per_part - 1); ++total_part_id; } } } } } }
94bedb7da9de0c859768250c732f95080464929b.hip
// !!! This is a file automatically generated by hipify!!! //Includes for IntelliSense #define _SIZE_T_DEFINED #ifndef __HIPCC__ #define __HIPCC__ #endif #ifndef __cplusplus #define __cplusplus #endif #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <ctime> #include <stdio.h> extern "C" { __device__ int gap(int val){ if (val == 0){ return 1; } if (val == 1){ return -4; } if (val == 2){ return -4; } return 0; } __global__ void align2(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int m, int n) { extern __shared__ int order[]; bool flag = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; int x = threadIdx.x + 1; int topLeft, left, top; if (index == 0){ for (int c = 0; c != m; c++){ order[c] = 0; } } if (index < m){ for (int c = 0; c != n; c++){ matrix[c*m + index] = 0; } } __syncthreads(); if (index < m - 1){ for (int c = 0; c != n; c++) { if (index == 0) { matrixDir[0] = 0; matrixDir[c*m + index] = 3; } else { if (c == 0) { matrixDir[c*m + index] = 2; } else { matrixDir[c*m + index] = -1; } } } __syncthreads(); for (int y = 1; y != n; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ if (order[index - 1] >= y){ order[index] = y; flag = 1; } else{ flag = 0; } } __syncthreads(); if (flag){ if ((a[index] != '-') && (b[y - 1] != '-')){ topLeft = matrix[((y - 1)*m) + (x - 1)] + scoreMatrix[((a[x - 1] - 65) * 27) + (b[y - 1] - 65)]; } else{ topLeft = gap(1); } top = matrix[((y - 1)*m) + x] + gap(2); left = matrix[(y*m) + (x - 1)] + gap(2); if (topLeft >= left&&topLeft >= top){ matrixDir[y*m + x] = 1; matrix[y*m + x] = topLeft; } if (top > topLeft&&top >= left){ matrixDir[y*m + x] = 3; matrix[y*m + x] = top; } if (left > topLeft&&left > top){ matrixDir[y*m + x] = 2; matrix[y*m + x] = left; } } else{ y--; } if (y == n - 1){ order[index]++; } __syncthreads(); } } } __global__ void traceback(int a[], int ai[], int b[], int bi[], int matrixDir[], int m, int n, int *k) { int x = m - 1; int y = n - 1; int c = 0; while (!(x == 0 && y == 0)){ if (matrixDir[y*m + x] == 3){ ai[c] = '-'; if (y > 0){ bi[c] = b[y - 1]; y--; } } else if (matrixDir[y*m + x] == 2){ bi[c] = '-'; if (x > 0){ ai[c] = a[x - 1]; x--; } } else if (matrixDir[y*m + x] == 1){ if (x > 0 && y > 0){ ai[c] = a[x - 1]; bi[c] = b[y - 1]; x--; y--; } } else{ x = 0; y = 0; } c++; } ai[c] = '\0'; bi[c] = '\0'; k[0] = c; k[0]--; } __global__ void invert(int a[], int ai[], int b[], int bi[], int k[]) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < k[0]){ int vala = ai[k[0] - index - 1]; int valb = bi[k[0] - index - 1]; __syncthreads(); a[index] = vala; b[index] = valb; __syncthreads(); } } __global__ void alignPSP(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int am, int an, int bm, int bn, int gap0, int gap1, int gap2, int offset, int size, int *order) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch bool flag = 0; int index = (blockIdx.x*blockDim.x + threadIdx.x) + (offset * size); int x = index + 1; int topLeft, left, top; //printf("Soy el index: %d\n", index); //if (index == 0){ // printf("gaps inside cuda: %d %d %d\n", gap0, gap1, gap2); //} if (index < an+1){ order[index] = 0; //matrix[c*(an + 1) + x] = 0; if (index == 0){ for (int c = 0; c != bn + 1; c++){ matrix[c*(an + 1)] = gap2*c; } } else{ matrix[x] = gap2*x; matrixDir[x] = 2; } } __syncthreads(); __threadfence_block(); if (index < an+1){ if (index == 0){ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1)] = 3; } } else{ matrixDir[index] = 2; } } if (index == 0){ matrixDir[0] = 0; } __syncthreads(); __threadfence_block(); if (index < an){ //printf("\n%d 1\n", index); for (int y = 1; y <= bn; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ if (order[index - 1] > y){ flag = 1; } else{ flag = 0; y--; } } __syncthreads(); __threadfence_block(); if (flag){ // printf("\n%d 2\n", index); int sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += (a[xx*an + (x - 1)] != '-'&&b[yy*bn + (y - 1)] != '-') ? scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)] : a[xx*an + (x - 1)] == b[yy*bn + (y - 1)] ? gap0 : gap2; // printf("Se compar: %c y %c y salio: %d\n", a[xx*an + (x - 1)], b[yy*bn + (y - 1)], scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]); } } //Agregar el Gap OPENING topLeft = matrix[(y - 1)*(an + 1) + (x - 1)] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += a[xx*an + (x - 1)] == '-' ? gap0 : gap2; } } /*if (y > 1) sum += matrixDir[((y - 1)*(an + 1)) + x] == 1 ? gap1*am : 0;*/ top = matrix[((y - 1)*(an + 1)) + x] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += b[yy*bn + (y - 1)] == '-' ? gap0 : gap2; } } /*if (x > 1) sum += matrixDir[y*(an + 1) + x - 1] == 1 ? gap1*bm : 0;*/ left = matrix[(y*(an + 1)) + (x - 1)] + sum; //printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); if (topLeft >= left&&topLeft >= top){ matrixDir[y*(an + 1) + x] = 1; matrix[y*(an + 1) + x] = topLeft; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (top > topLeft&&top >= left){ matrixDir[y*(an + 1) + x] = 3; matrix[y*(an + 1) + x] = top; //printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (left > topLeft&&left > top) { matrixDir[y*(an + 1) + x] = 2; matrix[y*(an + 1) + x] = left; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } // printf("\n%d 3\n", index); /*if (index == 84&&y==90){ printf("\nIndex 84 matrixDir[%d,%d]=%d\n", x, y, matrixDir[y*(an + 1) + x]); printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); }*/ if (y == bn){ order[index]++; } order[index]++; } __syncthreads(); } //printf("\n%d 4\n", index); order[index]++; order[index]++; __syncthreads(); __threadfence_block(); } } __global__ void align2SIMO_Initialize(int *sizes, int x, int y, int sqrZone){ } __global__ void kMerDistance(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, int *score, int sqrZone, int K) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); bool flag; int seqB = y*sqrZone + (index / sqrZone); int sum = 0; int c, d, k; int *kmer; int sumA, sumB; if (x <= y){ if (seqB < nseq&&seqA < nseq){ int *A, *B; int offsetA = 0; int offsetB = 0; if (seqB > seqA){ for (c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } A = &sequences[offsetA]; B = &sequences[offsetB]; for (d = 0; d != sizes[seqA] - K; d++){ kmer = &A[d]; for (c = 0; c != sizes[seqA] - K; c++){ flag = true; for (k = 0; k != K; k++){ if (kmer[k] != A[c + k]){ flag = false; break; } } sumA += flag; } for (c = 0; c != sizes[seqB] - K; c++){ flag = true; for (k = 0; k != K; k++){ if (kmer[k] != B[c + k]){ flag = false; break; } } sumB += flag; } if (sumA < sumB){ sum += sumA; } else{ sum += sumB; } } if (sizes[seqA] < sizes[seqB]){ sum /= sizes[seqA] - K + 1; } else{ sum /= sizes[seqB] - K + 1; } score[seqA*nseq + seqB] = sum; score[seqB*nseq + seqA] = sum; } } } } __global__ void align2SIMO(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, int *score, int sqrZone, int mode) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ //printf("\nholly\n"); /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } //printf("\n2"); int sSeqA = sizes[seqA] + 1; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ //printf("\n3"); A = &sequences[offsetA]; B = &sequences[offsetB]; /* for (int c = offsetA; c != offsetA + m; c++){ A[c - offsetA] = sequences[c]; } for (int c = offsetB; c != offsetB + n; c++){ B[c - offsetB] = sequences[c]; }*/ //printf("\nFin de la inicializacion\n"); for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] - gap2; top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] - gap2; left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } // printf("\nFin de valores a la matriz\n"); X = m; Y = n; int scor = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y)*(sSeqA)+(X)]){ case 1: X--; Y--; break; case 2: X--; scor++; break; case 3: Y--; scor++; break; default: break; } } score[seqA*nseq + seqB] = scor; score[seqB*nseq + seqA] = scor; // printf("\n[%d,%d]=%d\n",seqA,seqB,scor); } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void align2SIMO_g(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, double *score, int sqrZone, int r) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ //printf("\nholly\n"); /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } //printf("\n2"); int sSeqA = sizes[seqA] + 1; int sSeqB = sizes[seqB]; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ //printf("\n3"); A = &sequences[offsetA]; B = &sequences[offsetB]; for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] + gap2; //top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] + gap2; //left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } // printf("\nFin de valores a la matriz\n"); X = m; Y = n; score[0] = 0; double scor = 0; int *a_inv = new int[sSeqA + sSeqB]; int *b_inv = new int[sSeqA + sSeqB]; int size = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y*sSeqA) + X]){ case 1: a_inv[size] = A[X - 1]; b_inv[size] = B[Y - 1]; X--; Y--; break; case 2: a_inv[size] = A[X - 1]; b_inv[size] = 45; X--; break; case 3: a_inv[size] = 45; b_inv[size] = B[Y - 1]; Y--; break; default: break; } size++; } for (int c = 0; c != size; c++){ if (a_inv[c] == b_inv[c]){ scor++; } } score[seqA*nseq + seqB] = scor / (double)size; score[seqB*nseq + seqA] = scor / (double)size; } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void align2SIMO_r(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, double *score, int sqrZone, int r) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } int sSeqA = sizes[seqA] + 1; int sSeqB = sizes[seqB]; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ /* if (seqA == 0 && seqB == 1){ printf("Blosum:\n"); for (int c = 0; c != 10; c++){ printf("%d ", scoreMatrix[c]); } }*/ A = &sequences[offsetA]; B = &sequences[offsetB]; for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] + gap2; top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] + gap2; left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } X = m; Y = n; score[0] = 0; double scor = 0; int *a_inv = new int[sSeqA + sSeqB]; int *b_inv = new int[sSeqA + sSeqB]; int size = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y*sSeqA) + X]){ case 1: a_inv[size] = A[X - 1]; b_inv[size] = B[Y - 1]; X--; Y--; break; case 2: a_inv[size] = A[X - 1]; b_inv[size] = 45; X--; break; case 3: a_inv[size] = 45; b_inv[size] = B[Y - 1]; Y--; break; } size++; } double g=0, h=0; for (int c = 0; c != size; c++){ for (int d = c - r; d != c + r; d++){ if (d >= 0 && d < size){ if (d < c){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ g = (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ g = (double)gap0; } g += 4 - gap1; h = r - (d - (c - r)) + 1.0; scor += (h / g); } if (d == c){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ scor += (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ scor += gap0; } } if (d > c){ if (d < size){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ g = (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ g = gap0; } g += 4 - gap1; h = (d - (c - r)) + 1.0; scor += (h / g); } } } } } score[seqA*nseq + seqB] = scor; score[seqB*nseq + seqA] = scor; /* printf("\n[%d,%d] size %d score:%f\n", m, n, size, scor); if (seqA == 0 && seqB == 1){ for (int c = 0; c != size; c++){ printf("%c", a_inv[c]); } printf("\n"); for (int c = 0; c != size; c++){ printf("%c", b_inv[c]); } printf("\n"); }*/ } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void tracebackPSP(int inv[], int a[], int b[], int matrixDir[], int am, int an, int bm, int bn, int *k) { int x = an; int y = bn; int c = 0; int maxInv = (an + bn); for (int k = 0; k != maxInv*(am + bm); k++){ inv[k] = 64; } while (!(x == 0 && y == 0)){ if (matrixDir[y*(an + 1) + x] == 3){ for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = '-'; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = b[(d - am)*bn + y - 1]; } y--; } else { if (matrixDir[y*(an + 1) + x] == 2) { for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = a[d*an + x - 1]; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = '-'; } x--; } else { if (matrixDir[y*(an + 1) + x] == 1) { for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = a[d*an + x - 1]; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = b[(d - am)*bn + y - 1]; } x--; y--; } } } c++; } k[0] = c; } __global__ void invertPSP(int original[], int inverse[], int n, int k[], int mn) { for (int d = 0; d != n; d++){ for (int c = 0; c < k[0]; c++){ if (inverse[(mn*d) + (k[0] - c - 1)] > 0 && inverse[(mn*d) + (k[0] - c)] < 256){ original[(mn*d) + c] = inverse[(mn*d) + (k[0] - c - 1)]; } else{ original[(mn*d) + c] = 32; } } } } int main(){ return 0; } } /* 6 febrero 2015 __global__ void alignPSP(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int am, int an, int bm, int bn, int gap0, int gap1, int gap2, int offset, int size, int *order) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch bool flag = 0; int index = (blockIdx.x*blockDim.x + threadIdx.x) + (offset * size); int x = index + 1; int topLeft, left, top; printf("Soy el index: %d\n",index); if (index <= an){ order[index] = 0; for (int c = 0; c != bn + 1; c++){ matrix[c*(an + 1) + x] = 0; matrixDir[c*(an + 1) + x] = 0; } matrixDir[x] = 2; } __syncthreads(); __threadfence_block(); if (index <= an){ if (index != 0 && offset == 0){ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1) + index] = c == 0 ? 2 : 5; } } else{ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1)] = 3; } matrixDir[0] = 0; } } __syncthreads(); __threadfence_block(); if (index < an){ for (int y = 1; y <= bn; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ //printf("Yo: %d order[%d]=%d\n", index,index-1,order[index-1]); if (order[index - 1] > y){ flag = 1; order[index]++; } else{ flag = 0; y--; } } __syncthreads(); __threadfence_block(); if (flag){ int sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if ((a[xx*an + (x - 1)] != '-'&&b[yy*bn + (y - 1)] != '-')){ sum += scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]; // printf("Se compar: %c y %c y salio: %d\n", a[xx*an + (x - 1)], b[yy*bn + (y - 1)], scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]); } else{ if (a[xx*an + (x - 1)] == b[yy*bn + (y - 1)]){ sum += gap0; } else{ sum += gap2; } } } } topLeft = matrix[(y - 1)*(an+1) + (x - 1)] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if (a[xx*an + (x - 1)] == '-'){ sum += gap0; } else{ sum += gap2; } } } if (y>1) sum += matrixDir[((y-1)*(an+1)) + x] == 1 ? gap1 : 0; top = matrix[((y - 1)*(an+1)) + x] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if (b[yy*bn + (y - 1)] == '-'){ sum += gap0; } else{ sum += gap2; } } } if (x>1) sum += matrixDir[y*(an + 1) + x-1] == 1 ? gap1 : 0; left = matrix[(y*(an+1)) + (x - 1)] + sum; //printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); if (topLeft >= left&&topLeft >= top){ matrixDir[y*(an + 1) + x] = 1; matrix[y*(an + 1) + x] = topLeft; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } else if (top > topLeft&&top >= left){ matrixDir[y*(an + 1) + x] = 3; matrix[y*(an + 1) + x] = top; //printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } else { matrixDir[y*(an + 1) + x] = 2; matrix[y*(an + 1) + x] = left; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (y == bn){ order[index]++; } } } order[index]++; order[index]++; __syncthreads(); __threadfence_block(); } } */
94bedb7da9de0c859768250c732f95080464929b.cu
//Includes for IntelliSense #define _SIZE_T_DEFINED #ifndef __CUDACC__ #define __CUDACC__ #endif #ifndef __cplusplus #define __cplusplus #endif #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <ctime> #include <stdio.h> extern "C" { __device__ int gap(int val){ if (val == 0){ return 1; } if (val == 1){ return -4; } if (val == 2){ return -4; } return 0; } __global__ void align2(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int m, int n) { extern __shared__ int order[]; bool flag = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; int x = threadIdx.x + 1; int topLeft, left, top; if (index == 0){ for (int c = 0; c != m; c++){ order[c] = 0; } } if (index < m){ for (int c = 0; c != n; c++){ matrix[c*m + index] = 0; } } __syncthreads(); if (index < m - 1){ for (int c = 0; c != n; c++) { if (index == 0) { matrixDir[0] = 0; matrixDir[c*m + index] = 3; } else { if (c == 0) { matrixDir[c*m + index] = 2; } else { matrixDir[c*m + index] = -1; } } } __syncthreads(); for (int y = 1; y != n; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ if (order[index - 1] >= y){ order[index] = y; flag = 1; } else{ flag = 0; } } __syncthreads(); if (flag){ if ((a[index] != '-') && (b[y - 1] != '-')){ topLeft = matrix[((y - 1)*m) + (x - 1)] + scoreMatrix[((a[x - 1] - 65) * 27) + (b[y - 1] - 65)]; } else{ topLeft = gap(1); } top = matrix[((y - 1)*m) + x] + gap(2); left = matrix[(y*m) + (x - 1)] + gap(2); if (topLeft >= left&&topLeft >= top){ matrixDir[y*m + x] = 1; matrix[y*m + x] = topLeft; } if (top > topLeft&&top >= left){ matrixDir[y*m + x] = 3; matrix[y*m + x] = top; } if (left > topLeft&&left > top){ matrixDir[y*m + x] = 2; matrix[y*m + x] = left; } } else{ y--; } if (y == n - 1){ order[index]++; } __syncthreads(); } } } __global__ void traceback(int a[], int ai[], int b[], int bi[], int matrixDir[], int m, int n, int *k) { int x = m - 1; int y = n - 1; int c = 0; while (!(x == 0 && y == 0)){ if (matrixDir[y*m + x] == 3){ ai[c] = '-'; if (y > 0){ bi[c] = b[y - 1]; y--; } } else if (matrixDir[y*m + x] == 2){ bi[c] = '-'; if (x > 0){ ai[c] = a[x - 1]; x--; } } else if (matrixDir[y*m + x] == 1){ if (x > 0 && y > 0){ ai[c] = a[x - 1]; bi[c] = b[y - 1]; x--; y--; } } else{ x = 0; y = 0; } c++; } ai[c] = '\0'; bi[c] = '\0'; k[0] = c; k[0]--; } __global__ void invert(int a[], int ai[], int b[], int bi[], int k[]) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < k[0]){ int vala = ai[k[0] - index - 1]; int valb = bi[k[0] - index - 1]; __syncthreads(); a[index] = vala; b[index] = valb; __syncthreads(); } } __global__ void alignPSP(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int am, int an, int bm, int bn, int gap0, int gap1, int gap2, int offset, int size, int *order) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch bool flag = 0; int index = (blockIdx.x*blockDim.x + threadIdx.x) + (offset * size); int x = index + 1; int topLeft, left, top; //printf("Soy el index: %d\n", index); //if (index == 0){ // printf("gaps inside cuda: %d %d %d\n", gap0, gap1, gap2); //} if (index < an+1){ order[index] = 0; //matrix[c*(an + 1) + x] = 0; if (index == 0){ for (int c = 0; c != bn + 1; c++){ matrix[c*(an + 1)] = gap2*c; } } else{ matrix[x] = gap2*x; matrixDir[x] = 2; } } __syncthreads(); __threadfence_block(); if (index < an+1){ if (index == 0){ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1)] = 3; } } else{ matrixDir[index] = 2; } } if (index == 0){ matrixDir[0] = 0; } __syncthreads(); __threadfence_block(); if (index < an){ //printf("\n%d 1\n", index); for (int y = 1; y <= bn; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ if (order[index - 1] > y){ flag = 1; } else{ flag = 0; y--; } } __syncthreads(); __threadfence_block(); if (flag){ // printf("\n%d 2\n", index); int sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += (a[xx*an + (x - 1)] != '-'&&b[yy*bn + (y - 1)] != '-') ? scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)] : a[xx*an + (x - 1)] == b[yy*bn + (y - 1)] ? gap0 : gap2; // printf("Se comparó: %c y %c y salio: %d\n", a[xx*an + (x - 1)], b[yy*bn + (y - 1)], scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]); } } //Agregar el Gap OPENING topLeft = matrix[(y - 1)*(an + 1) + (x - 1)] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += a[xx*an + (x - 1)] == '-' ? gap0 : gap2; } } /*if (y > 1) sum += matrixDir[((y - 1)*(an + 1)) + x] == 1 ? gap1*am : 0;*/ top = matrix[((y - 1)*(an + 1)) + x] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ sum += b[yy*bn + (y - 1)] == '-' ? gap0 : gap2; } } /*if (x > 1) sum += matrixDir[y*(an + 1) + x - 1] == 1 ? gap1*bm : 0;*/ left = matrix[(y*(an + 1)) + (x - 1)] + sum; //printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); if (topLeft >= left&&topLeft >= top){ matrixDir[y*(an + 1) + x] = 1; matrix[y*(an + 1) + x] = topLeft; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (top > topLeft&&top >= left){ matrixDir[y*(an + 1) + x] = 3; matrix[y*(an + 1) + x] = top; //printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (left > topLeft&&left > top) { matrixDir[y*(an + 1) + x] = 2; matrix[y*(an + 1) + x] = left; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } // printf("\n%d 3\n", index); /*if (index == 84&&y==90){ printf("\nIndex 84 matrixDir[%d,%d]=%d\n", x, y, matrixDir[y*(an + 1) + x]); printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); }*/ if (y == bn){ order[index]++; } order[index]++; } __syncthreads(); } //printf("\n%d 4\n", index); order[index]++; order[index]++; __syncthreads(); __threadfence_block(); } } __global__ void align2SIMO_Initialize(int *sizes, int x, int y, int sqrZone){ } __global__ void kMerDistance(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, int *score, int sqrZone, int K) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); bool flag; int seqB = y*sqrZone + (index / sqrZone); int sum = 0; int c, d, k; int *kmer; int sumA, sumB; if (x <= y){ if (seqB < nseq&&seqA < nseq){ int *A, *B; int offsetA = 0; int offsetB = 0; if (seqB > seqA){ for (c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } A = &sequences[offsetA]; B = &sequences[offsetB]; for (d = 0; d != sizes[seqA] - K; d++){ kmer = &A[d]; for (c = 0; c != sizes[seqA] - K; c++){ flag = true; for (k = 0; k != K; k++){ if (kmer[k] != A[c + k]){ flag = false; break; } } sumA += flag; } for (c = 0; c != sizes[seqB] - K; c++){ flag = true; for (k = 0; k != K; k++){ if (kmer[k] != B[c + k]){ flag = false; break; } } sumB += flag; } if (sumA < sumB){ sum += sumA; } else{ sum += sumB; } } if (sizes[seqA] < sizes[seqB]){ sum /= sizes[seqA] - K + 1; } else{ sum /= sizes[seqB] - K + 1; } score[seqA*nseq + seqB] = sum; score[seqB*nseq + seqA] = sum; } } } } __global__ void align2SIMO(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, int *score, int sqrZone, int mode) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ //printf("\nholly\n"); /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } //printf("\n2"); int sSeqA = sizes[seqA] + 1; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ //printf("\n3"); A = &sequences[offsetA]; B = &sequences[offsetB]; /* for (int c = offsetA; c != offsetA + m; c++){ A[c - offsetA] = sequences[c]; } for (int c = offsetB; c != offsetB + n; c++){ B[c - offsetB] = sequences[c]; }*/ //printf("\nFin de la inicializacion\n"); for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] - gap2; top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] - gap2; left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } // printf("\nFin de valores a la matriz\n"); X = m; Y = n; int scor = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y)*(sSeqA)+(X)]){ case 1: X--; Y--; break; case 2: X--; scor++; break; case 3: Y--; scor++; break; default: break; } } score[seqA*nseq + seqB] = scor; score[seqB*nseq + seqA] = scor; // printf("\n[%d,%d]=%d\n",seqA,seqB,scor); } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void align2SIMO_g(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, double *score, int sqrZone, int r) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ //printf("\nholly\n"); /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } //printf("\n2"); int sSeqA = sizes[seqA] + 1; int sSeqB = sizes[seqB]; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ //printf("\n3"); A = &sequences[offsetA]; B = &sequences[offsetB]; for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] + gap2; //top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] + gap2; //left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } // printf("\nFin de valores a la matriz\n"); X = m; Y = n; score[0] = 0; double scor = 0; int *a_inv = new int[sSeqA + sSeqB]; int *b_inv = new int[sSeqA + sSeqB]; int size = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y*sSeqA) + X]){ case 1: a_inv[size] = A[X - 1]; b_inv[size] = B[Y - 1]; X--; Y--; break; case 2: a_inv[size] = A[X - 1]; b_inv[size] = 45; X--; break; case 3: a_inv[size] = 45; b_inv[size] = B[Y - 1]; Y--; break; default: break; } size++; } for (int c = 0; c != size; c++){ if (a_inv[c] == b_inv[c]){ scor++; } } score[seqA*nseq + seqB] = scor / (double)size; score[seqB*nseq + seqA] = scor / (double)size; } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void align2SIMO_r(int *matrix, int *matrixDir, int *indexes, int *sequences, int *sizes, int nseq, int *scoreMatrix, int x, int y, int gap0, int gap1, int gap2, double *score, int sqrZone, int r) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch //mode 0=Column score 1=propossal int index = blockIdx.x*blockDim.x + threadIdx.x; int seqA = x*sqrZone + (index % sqrZone); int seqB = y*sqrZone + (index / sqrZone); //printf("\nholly\n"); if (x <= y){ /* if (index == 0){ matrix = new int**[(sqrZone*sqrZone)]; matrixDir = new int**[(sqrZone*sqrZone)]; for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ matrix[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; matrixDir[ccc] = new int*[sizes[x*sqrZone + (ccc % sqrZone)] + 1]; } for (int ccc = 0; ccc != sqrZone*sqrZone; ccc++){ for (int d = 0; d != sizes[x*sqrZone + (ccc % sqrZone)] + 1; d++){ matrix[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; matrixDir[ccc][d] = new int[sizes[y*sqrZone + (ccc / sqrZone)] + 1]; } } } __syncthreads(); */ /**/ if (seqB < nseq&&seqA < nseq){ int offsetA = 0; int offsetB = 0; int sum = 0; int m = sizes[seqA]; int n = sizes[seqB]; /*int* A = new int[m]; int* B = new int[n];*/ int *A, *B; int *MatrixDir, *Matrix; int X, Y; if (seqB > seqA){ for (int c = 0; c <= seqB; c++){ if (c == seqA){ offsetA = sum; } if (c == seqB){ offsetB = sum; } sum += sizes[c]; } int sSeqA = sizes[seqA] + 1; int sSeqB = sizes[seqB]; int offmatrix = indexes[index]; Matrix = &matrix[offmatrix]; MatrixDir = &matrixDir[offmatrix]; for (X = 0; X != m + 1; X++){ MatrixDir[X] = 2; Matrix[X] = 0; } for (Y = 0; Y != n + 1; Y++){ MatrixDir[Y*sSeqA] = 3; Matrix[Y*sSeqA] = 0; } MatrixDir[0] = 0; Matrix[0] = 0; /* for (int x = 0; x != m + 1; x++){ for (int y = 0; y != n + 1; y++){ //printf("[%d](%d,%d) [ ]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); Matrix[ y*(sizes[seqA] + 1) + x] = 0; if (y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 2; } else{ MatrixDir[ y*(sizes[seqA] + 1) + x] = -1; } if (x == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 3; } if (x == 0 && y == 0){ MatrixDir[y*(sizes[seqA] + 1) + x] = 0; } //printf("[%d](%d,%d) [X]\n", blockIdx.x*blockDim.x + threadIdx.x, x, y, m, n); } } */ /* if (seqA == 0 && seqB == 1){ printf("Blosum:\n"); for (int c = 0; c != 10; c++){ printf("%d ", scoreMatrix[c]); } }*/ A = &sequences[offsetA]; B = &sequences[offsetB]; for (int x = 1; x != m + 1; x++){ for (int y = 1; y != n + 1; y++){ int topLeft = 0, left = 0, top = 0; topLeft = Matrix[(y - 1)*(sSeqA)+(x - 1)] + scoreMatrix[((A[x - 1] - 65) * 27) + (B[y - 1] - 65)]; top = Matrix[(y - 1)*(sSeqA)+(x)] + gap2; top += MatrixDir[(y - 1)*(sSeqA)+(x)] == 1 ? gap1 : 0; left = Matrix[(y)*(sSeqA)+(x - 1)] + gap2; left += MatrixDir[(y)*(sSeqA)+(x - 1)] == 1 ? gap1 : 0; if (topLeft >= left&&topLeft >= top){ MatrixDir[(y)*(sSeqA)+(x)] = 1; Matrix[(y)*(sSeqA)+(x)] = topLeft; } if (top > topLeft&&top >= left){ MatrixDir[(y)*(sSeqA)+(x)] = 3; Matrix[(y)*(sSeqA)+(x)] = top; } if (left > top&&left > topLeft) { MatrixDir[(y)*(sSeqA)+(x)] = 2; Matrix[(y)*(sSeqA)+(x)] = left; } } } X = m; Y = n; score[0] = 0; double scor = 0; int *a_inv = new int[sSeqA + sSeqB]; int *b_inv = new int[sSeqA + sSeqB]; int size = 0; while (!(X == 0 && Y == 0)){ switch (MatrixDir[(Y*sSeqA) + X]){ case 1: a_inv[size] = A[X - 1]; b_inv[size] = B[Y - 1]; X--; Y--; break; case 2: a_inv[size] = A[X - 1]; b_inv[size] = 45; X--; break; case 3: a_inv[size] = 45; b_inv[size] = B[Y - 1]; Y--; break; } size++; } double g=0, h=0; for (int c = 0; c != size; c++){ for (int d = c - r; d != c + r; d++){ if (d >= 0 && d < size){ if (d < c){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ g = (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ g = (double)gap0; } g += 4 - gap1; h = r - (d - (c - r)) + 1.0; scor += (h / g); } if (d == c){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ scor += (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ scor += gap0; } } if (d > c){ if (d < size){ if (a_inv[c] != '-'&&b_inv[d] != '-'){ g = (double)scoreMatrix[((a_inv[c] - 65) * 27) + (b_inv[d] - 65)]; } else{ g = gap0; } g += 4 - gap1; h = (d - (c - r)) + 1.0; scor += (h / g); } } } } } score[seqA*nseq + seqB] = scor; score[seqB*nseq + seqA] = scor; /* printf("\n[%d,%d] size %d score:%f\n", m, n, size, scor); if (seqA == 0 && seqB == 1){ for (int c = 0; c != size; c++){ printf("%c", a_inv[c]); } printf("\n"); for (int c = 0; c != size; c++){ printf("%c", b_inv[c]); } printf("\n"); }*/ } else{ if (seqA == seqB)score[seqA*nseq + seqB] = 0; } } } } __global__ void tracebackPSP(int inv[], int a[], int b[], int matrixDir[], int am, int an, int bm, int bn, int *k) { int x = an; int y = bn; int c = 0; int maxInv = (an + bn); for (int k = 0; k != maxInv*(am + bm); k++){ inv[k] = 64; } while (!(x == 0 && y == 0)){ if (matrixDir[y*(an + 1) + x] == 3){ for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = '-'; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = b[(d - am)*bn + y - 1]; } y--; } else { if (matrixDir[y*(an + 1) + x] == 2) { for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = a[d*an + x - 1]; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = '-'; } x--; } else { if (matrixDir[y*(an + 1) + x] == 1) { for (int d = 0; d != am; d++){ inv[(d*maxInv) + c] = a[d*an + x - 1]; } for (int d = am; d != am + bm; d++){ inv[((d*maxInv) + c)] = b[(d - am)*bn + y - 1]; } x--; y--; } } } c++; } k[0] = c; } __global__ void invertPSP(int original[], int inverse[], int n, int k[], int mn) { for (int d = 0; d != n; d++){ for (int c = 0; c < k[0]; c++){ if (inverse[(mn*d) + (k[0] - c - 1)] > 0 && inverse[(mn*d) + (k[0] - c)] < 256){ original[(mn*d) + c] = inverse[(mn*d) + (k[0] - c - 1)]; } else{ original[(mn*d) + c] = 32; } } } } int main(){ return 0; } } /* 6 febrero 2015 __global__ void alignPSP(int *a, int *b, int *matrix, int *matrixDir, int *scoreMatrix, int am, int an, int bm, int bn, int gap0, int gap1, int gap2, int offset, int size, int *order) { //gap0=gap gap, gap1=gap opening, gap2=gap mistmatch bool flag = 0; int index = (blockIdx.x*blockDim.x + threadIdx.x) + (offset * size); int x = index + 1; int topLeft, left, top; printf("Soy el index: %d\n",index); if (index <= an){ order[index] = 0; for (int c = 0; c != bn + 1; c++){ matrix[c*(an + 1) + x] = 0; matrixDir[c*(an + 1) + x] = 0; } matrixDir[x] = 2; } __syncthreads(); __threadfence_block(); if (index <= an){ if (index != 0 && offset == 0){ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1) + index] = c == 0 ? 2 : 5; } } else{ for (int c = 0; c != bn + 1; c++) { matrixDir[c*(an + 1)] = 3; } matrixDir[0] = 0; } } __syncthreads(); __threadfence_block(); if (index < an){ for (int y = 1; y <= bn; y++){ if (index == 0){ order[0] = y; flag = 1; } else{ //printf("Yo: %d order[%d]=%d\n", index,index-1,order[index-1]); if (order[index - 1] > y){ flag = 1; order[index]++; } else{ flag = 0; y--; } } __syncthreads(); __threadfence_block(); if (flag){ int sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if ((a[xx*an + (x - 1)] != '-'&&b[yy*bn + (y - 1)] != '-')){ sum += scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]; // printf("Se comparó: %c y %c y salio: %d\n", a[xx*an + (x - 1)], b[yy*bn + (y - 1)], scoreMatrix[((a[xx*an + (x - 1)] - 65) * 27) + (b[yy*bn + (y - 1)] - 65)]); } else{ if (a[xx*an + (x - 1)] == b[yy*bn + (y - 1)]){ sum += gap0; } else{ sum += gap2; } } } } topLeft = matrix[(y - 1)*(an+1) + (x - 1)] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if (a[xx*an + (x - 1)] == '-'){ sum += gap0; } else{ sum += gap2; } } } if (y>1) sum += matrixDir[((y-1)*(an+1)) + x] == 1 ? gap1 : 0; top = matrix[((y - 1)*(an+1)) + x] + sum; sum = 0; for (int xx = 0; xx != am; xx++){ for (int yy = 0; yy != bm; yy++){ if (b[yy*bn + (y - 1)] == '-'){ sum += gap0; } else{ sum += gap2; } } } if (x>1) sum += matrixDir[y*(an + 1) + x-1] == 1 ? gap1 : 0; left = matrix[(y*(an+1)) + (x - 1)] + sum; //printf("matrix[%d,%d] top:%d topleft:%d left:%d\n", x, y, top, topLeft, left); if (topLeft >= left&&topLeft >= top){ matrixDir[y*(an + 1) + x] = 1; matrix[y*(an + 1) + x] = topLeft; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } else if (top > topLeft&&top >= left){ matrixDir[y*(an + 1) + x] = 3; matrix[y*(an + 1) + x] = top; //printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } else { matrixDir[y*(an + 1) + x] = 2; matrix[y*(an + 1) + x] = left; // printf("matrix[%d,%d]=%d,%d\n", x, y, matrix[y*(an + 1) + x], matrixDir[y*(an + 1) + x]); } if (y == bn){ order[index]++; } } } order[index]++; order[index]++; __syncthreads(); __threadfence_block(); } } */
927b98c1dbc9b4663d7cfa730c45b33b06031a27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ------------ * This code is provided solely for the personal and private use of * students taking the CSC367H1 course at the University of Toronto. * Copying for purposes other than this use is expressly prohibited. * All forms of distribution of this code, whether as given or with * any changes, are expressly prohibited. * * Authors: Bogdan Simion, Felipe de Azevedo Piovezan * * All of the files in this directory and all subdirectories are: * Copyright (c) 2017 Bogdan Simion * ------------- */ #include <stdio.h> #include <string> #include <unistd.h> #include <sys/sysinfo.h> #include <time.h> #include "pgm.h" #include "filters.h" #include "kernels.h" #include "clock.h" /* Use this function to print the time of each of your kernels. * The parameter names are intuitive, but don't hesitate to ask * for clarifications. * DO NOT modify this function.*/ void print_run(float time_cpu, int kernel, float time_gpu_computation, float time_gpu_transfer_in, float time_gpu_transfer_out) { printf("%12.6f ", time_cpu); printf("%5d ", kernel); printf("%12.6f ", time_gpu_computation); printf("%14.6f ", time_gpu_transfer_in); printf("%15.6f ", time_gpu_transfer_out); printf("%13.2f ", time_cpu/time_gpu_computation); printf("%7.2f\n", time_cpu/ (time_gpu_computation + time_gpu_transfer_in + time_gpu_transfer_out)); } __constant__ int8_t filter_constant[9*9]; int filter_dimension = 9; int filter_index = 2; int main(int argc, char **argv) { int c; std::string input_filename, cpu_output_filename, base_gpu_output_filename; if (argc < 3) { printf("Wrong usage. Expected -i <input_file> -o <output_file>\n"); return 0; } while ((c = getopt (argc, argv, "i:o:")) != -1) { switch (c) { case 'i': input_filename = std::string(optarg); break; case 'o': cpu_output_filename = std::string(optarg); base_gpu_output_filename = std::string(optarg); break; default: return 0; } } pgm_image source_img; init_pgm_image(&source_img); if (load_pgm_from_file(input_filename.c_str(), &source_img) != NO_ERR) { printf("Error loading source image.\n"); return 0; } /* Do not modify this printf */ printf("CPU_time(ms) Kernel GPU_time(ms) TransferIn(ms) TransferOut(ms) " "Speedup_noTrf Speedup\n"); pgm_image cpu_output_img; copy_pgm_image_size(&source_img, &cpu_output_img); float time_cpu; struct timespec start, stop; /* * TODO: Run your CPU implementation here and get its time. Don't include * file IO in your measurement. * */ clock_gettime(CLOCK_MONOTONIC, &start); apply_filter2d_threaded(builtin_filters[filter_index], source_img.matrix, cpu_output_img.matrix, source_img.width, source_img.height, get_nprocs()); clock_gettime(CLOCK_MONOTONIC, &stop); time_cpu = (stop.tv_sec - start.tv_sec)*1000 + (double)(stop.tv_nsec - start.tv_nsec) / 1000000; save_pgm_to_file(cpu_output_filename.c_str(), &cpu_output_img); /* * CPU implementation ends * */ hipDeviceProp_t properties; hipGetDeviceProperties(&properties, 0); // create source image at host and device int32_t n = source_img.width * source_img.height; int32_t *d_source_img; hipMalloc((void **)&d_source_img, n*sizeof(int)); int32_t threads_per_block = min(1024, properties.maxThreadsPerBlock); int8_t *filter; hipMalloc((void **)&filter, filter_dimension*filter_dimension*sizeof(int8_t)); hipMemcpy(filter, builtin_filters_int[filter_index], filter_dimension*filter_dimension*sizeof(int8_t), hipMemcpyHostToDevice); hipMemcpyToSymbol(filter_constant, builtin_filters_int[2], 9*9*sizeof(int8_t)); int32_t blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // number of blocks we need in reduction int num_iterations = 1; // number of times we need to call the reduction kernels to get the min/max int temp = blocks_reduction; while (temp > 1) { num_iterations += 1; temp = (temp + (threads_per_block - 1)) / threads_per_block; } int32_t *d_max; // device memory for the max values int32_t *d_min; // device memory for the min values hipMalloc((void **)&d_max, blocks_reduction * sizeof(int)); hipMalloc((void **)&d_min, blocks_reduction * sizeof(int)); int32_t n_reduction; // total number of values we want to put into the reduction kernel int host_max = 255; int host_min = 0; Clock gpu_clock; float time_in, time_kernel, time_out; /* TODO: * run each of your gpu implementations here, * get their time, * and save the output image to a file. * Don't forget to add the number of the kernel * as a prefix to the output filename: * Print the execution times by calling print_run(). */ /* * Kernel 1 * */ // create some basic information std::string gpu_file1 = "1"+base_gpu_output_filename; int32_t blocks_1 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); hipMemcpy(d_source_img, source_img.matrix, n*sizeof(int), hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img1; copy_pgm_image_size(&source_img, &gpu_output_img1); int32_t *d_output_img1; hipMalloc((void **)&d_output_img1, n*sizeof(int)); // call kernel1 gpu_clock.start(); hipLaunchKernelGGL(( kernel1), dim3(blocks_1), dim3(threads_per_block) , 0, 0, filter, filter_dimension, d_source_img, d_output_img1, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_output_img1, d_output_img1, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back hipMemcpy(&host_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&host_min, d_min, sizeof(int), hipMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize hipLaunchKernelGGL(( normalize1), dim3(blocks_1), dim3(threads_per_block) , 0, 0, d_output_img1, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); hipMemcpy(gpu_output_img1.matrix, d_output_img1, n * sizeof(int), hipMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 1, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file1.c_str(), &gpu_output_img1); // Free memory hipFree(d_output_img1); destroy_pgm_image(&gpu_output_img1); /* * Kernel 1 end * */ /* * Kernel 2 * */ // create some basic information std::string gpu_file2 = "2"+base_gpu_output_filename; int32_t blocks_2 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); hipMemcpy(d_source_img, source_img.matrix, n*sizeof(int), hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img2; copy_pgm_image_size(&source_img, &gpu_output_img2); int32_t *d_output_img2; hipMalloc((void **)&d_output_img2, n*sizeof(int)); // call kernel gpu_clock.start(); hipLaunchKernelGGL(( kernel2), dim3(blocks_2), dim3(threads_per_block) , 0, 0, filter, filter_dimension, d_source_img, d_output_img2, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_output_img2, d_output_img2, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back hipMemcpy(&host_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&host_min, d_min, sizeof(int), hipMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize hipLaunchKernelGGL(( normalize2), dim3(blocks_2), dim3(threads_per_block) , 0, 0, d_output_img2, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); hipMemcpy(gpu_output_img2.matrix, d_output_img2, n * sizeof(int), hipMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 2, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file2.c_str(), &gpu_output_img2); // Free memory hipFree(d_output_img2); destroy_pgm_image(&gpu_output_img2); /* * Kernel 2 end * */ /* * Kernel 3 * */ // create some basic information std::string gpu_file3 = "3"+base_gpu_output_filename; int nrows = 1; int32_t blocks_3 = ((source_img.height + nrows - 1)/nrows + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); hipMemcpy(d_source_img, source_img.matrix, n*sizeof(int), hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img3; copy_pgm_image_size(&source_img, &gpu_output_img3); int32_t *d_output_img3; hipMalloc((void **)&d_output_img3, n*sizeof(int)); // call kernel gpu_clock.start(); hipLaunchKernelGGL(( kernel3), dim3(blocks_3), dim3(threads_per_block) , 0, 0, filter, filter_dimension, d_source_img, d_output_img3, source_img.width, source_img.height, nrows); /* reduction to calculate max and min */ n_reduction = n; hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_output_img3, d_output_img3, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back hipMemcpy(&host_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&host_min, d_min, sizeof(int), hipMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize hipLaunchKernelGGL(( normalize3), dim3(blocks_3), dim3(threads_per_block) , 0, 0, d_output_img3, source_img.width, source_img.height, host_min, host_max, nrows); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); hipMemcpy(gpu_output_img3.matrix, d_output_img3, n * sizeof(int), hipMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 3, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file3.c_str(), &gpu_output_img3); // Free memory hipFree(d_output_img3); destroy_pgm_image(&gpu_output_img3); /* * Kernel 3 end * */ /* * Kernel 4 * */ // create some basic information std::string gpu_file4 = "4"+base_gpu_output_filename; int pixels_per_thread = 8; int32_t blocks_4 = ((n + pixels_per_thread - 1)/pixels_per_thread + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); hipMemcpy(d_source_img, source_img.matrix, n*sizeof(int), hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img4; copy_pgm_image_size(&source_img, &gpu_output_img4); int32_t *d_output_img4; hipMalloc((void **)&d_output_img4, n*sizeof(int)); // call kernel gpu_clock.start(); hipLaunchKernelGGL(( kernel4), dim3(blocks_4), dim3(threads_per_block) , 0, 0, filter, filter_dimension, d_source_img, d_output_img4, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_output_img4, d_output_img4, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back hipMemcpy(&host_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&host_min, d_min, sizeof(int), hipMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize hipLaunchKernelGGL(( normalize4), dim3(blocks_4), dim3(threads_per_block) , 0, 0, d_output_img4, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); hipMemcpy(gpu_output_img4.matrix, d_output_img4, n * sizeof(int), hipMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 4, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file4.c_str(), &gpu_output_img4); // Free memory hipFree(d_output_img4); destroy_pgm_image(&gpu_output_img4); /* * Kernel 4 end * */ /* * Kernel 5 * */ std::string gpu_file5 = "5"+base_gpu_output_filename; int32_t blocks_5 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; int32_t *d_source_img2D; bool _2d = false; // 2D Pitch restriction if (source_img.width < 60000 && source_img.height < 60000) { _2d = true; } // transfer in gpu_clock.start(); // Prepare Resource/Texture objects hipTextureObject_t tex; hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = hipReadModeElementType; if (_2d) { // Use 2D Pitch Resource // Reference: https://stackoverflow.com/questions/16380883/new-cuda-texture-object-getting-wrong-data-in-2d-case size_t pitch; hipMallocPitch(&d_source_img2D, &pitch, sizeof(int) * source_img.width, source_img.height); hipMemcpy2D(d_source_img2D, pitch, source_img.matrix, sizeof(int) * source_img.width, sizeof(int) * source_img.width, source_img.height, hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // Specify Resource details resDesc.resType = hipResourceTypePitch2D; // for 2D texture resDesc.res.pitch2D.devPtr = d_source_img2D; resDesc.res.pitch2D.pitchInBytes = pitch; resDesc.res.pitch2D.width = source_img.width; resDesc.res.pitch2D.height = source_img.height; resDesc.res.pitch2D.desc.f = hipChannelFormatKindFloat; resDesc.res.pitch2D.desc.x = 32; // bits per channel resDesc.res.pitch2D.desc.y = 0; } else { hipMemcpy(d_source_img, source_img.matrix, n*sizeof(int), hipMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // Specify Resource details // Reference: https://devblogs.nvidia.com/cuda-pro-tip-kepler-texture-objects-improve-performance-and-flexibility/ resDesc.resType = hipResourceTypeLinear; // for 1D texture resDesc.res.linear.devPtr = d_source_img; resDesc.res.linear.desc.f = hipChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = n*sizeof(int); } // Create texture object hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // create output image at host and device pgm_image gpu_output_img5; copy_pgm_image_size(&source_img, &gpu_output_img5); int32_t *d_output_img5; hipMalloc((void **)&d_output_img5, n*sizeof(int)); dim3 gridSize((source_img.width + 31) / 32, (source_img.height + 31) / 32); dim3 blockSize(32, 32); // call kernel gpu_clock.start(); if (_2d) { hipLaunchKernelGGL(( kernel5), dim3(gridSize), dim3(blockSize) , 0, 0, filter_dimension, tex, d_output_img5, source_img.width, source_img.height); } else { hipLaunchKernelGGL(( kernel5_1d), dim3(blocks_5), dim3(threads_per_block) , 0, 0, filter_dimension, tex, d_output_img5, source_img.width, source_img.height); } /* reduction to calculate max and min */ n_reduction = n; hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_output_img5, d_output_img5, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(threads_per_block) , 0, 0, d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back hipMemcpy(&host_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&host_min, d_min, sizeof(int), hipMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize if (_2d) { hipLaunchKernelGGL(( normalize5), dim3(gridSize), dim3(blockSize) , 0, 0, d_output_img5, source_img.width, source_img.height, host_min, host_max); } else { hipLaunchKernelGGL(( normalize5_1d), dim3(blocks_5), dim3(threads_per_block) , 0, 0, d_output_img5, source_img.width, source_img.height, host_min, host_max); } time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); hipMemcpy(gpu_output_img5.matrix, d_output_img5, n * sizeof(int), hipMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 5, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file5.c_str(), &gpu_output_img5); // Free memory hipFree(d_source_img2D); hipFree(d_source_img); hipFree(d_output_img5); destroy_pgm_image(&gpu_output_img5); /* * Kernel 5 end * */ // Free memory hipDestroyTextureObject(tex); hipFree(filter); hipFree(d_max); hipFree(d_min); }
927b98c1dbc9b4663d7cfa730c45b33b06031a27.cu
/* ------------ * This code is provided solely for the personal and private use of * students taking the CSC367H1 course at the University of Toronto. * Copying for purposes other than this use is expressly prohibited. * All forms of distribution of this code, whether as given or with * any changes, are expressly prohibited. * * Authors: Bogdan Simion, Felipe de Azevedo Piovezan * * All of the files in this directory and all subdirectories are: * Copyright (c) 2017 Bogdan Simion * ------------- */ #include <stdio.h> #include <string> #include <unistd.h> #include <sys/sysinfo.h> #include <time.h> #include "pgm.h" #include "filters.h" #include "kernels.h" #include "clock.h" /* Use this function to print the time of each of your kernels. * The parameter names are intuitive, but don't hesitate to ask * for clarifications. * DO NOT modify this function.*/ void print_run(float time_cpu, int kernel, float time_gpu_computation, float time_gpu_transfer_in, float time_gpu_transfer_out) { printf("%12.6f ", time_cpu); printf("%5d ", kernel); printf("%12.6f ", time_gpu_computation); printf("%14.6f ", time_gpu_transfer_in); printf("%15.6f ", time_gpu_transfer_out); printf("%13.2f ", time_cpu/time_gpu_computation); printf("%7.2f\n", time_cpu/ (time_gpu_computation + time_gpu_transfer_in + time_gpu_transfer_out)); } __constant__ int8_t filter_constant[9*9]; int filter_dimension = 9; int filter_index = 2; int main(int argc, char **argv) { int c; std::string input_filename, cpu_output_filename, base_gpu_output_filename; if (argc < 3) { printf("Wrong usage. Expected -i <input_file> -o <output_file>\n"); return 0; } while ((c = getopt (argc, argv, "i:o:")) != -1) { switch (c) { case 'i': input_filename = std::string(optarg); break; case 'o': cpu_output_filename = std::string(optarg); base_gpu_output_filename = std::string(optarg); break; default: return 0; } } pgm_image source_img; init_pgm_image(&source_img); if (load_pgm_from_file(input_filename.c_str(), &source_img) != NO_ERR) { printf("Error loading source image.\n"); return 0; } /* Do not modify this printf */ printf("CPU_time(ms) Kernel GPU_time(ms) TransferIn(ms) TransferOut(ms) " "Speedup_noTrf Speedup\n"); pgm_image cpu_output_img; copy_pgm_image_size(&source_img, &cpu_output_img); float time_cpu; struct timespec start, stop; /* * TODO: Run your CPU implementation here and get its time. Don't include * file IO in your measurement. * */ clock_gettime(CLOCK_MONOTONIC, &start); apply_filter2d_threaded(builtin_filters[filter_index], source_img.matrix, cpu_output_img.matrix, source_img.width, source_img.height, get_nprocs()); clock_gettime(CLOCK_MONOTONIC, &stop); time_cpu = (stop.tv_sec - start.tv_sec)*1000 + (double)(stop.tv_nsec - start.tv_nsec) / 1000000; save_pgm_to_file(cpu_output_filename.c_str(), &cpu_output_img); /* * CPU implementation ends * */ cudaDeviceProp properties; cudaGetDeviceProperties(&properties, 0); // create source image at host and device int32_t n = source_img.width * source_img.height; int32_t *d_source_img; cudaMalloc((void **)&d_source_img, n*sizeof(int)); int32_t threads_per_block = min(1024, properties.maxThreadsPerBlock); int8_t *filter; cudaMalloc((void **)&filter, filter_dimension*filter_dimension*sizeof(int8_t)); cudaMemcpy(filter, builtin_filters_int[filter_index], filter_dimension*filter_dimension*sizeof(int8_t), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(filter_constant, builtin_filters_int[2], 9*9*sizeof(int8_t)); int32_t blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // number of blocks we need in reduction int num_iterations = 1; // number of times we need to call the reduction kernels to get the min/max int temp = blocks_reduction; while (temp > 1) { num_iterations += 1; temp = (temp + (threads_per_block - 1)) / threads_per_block; } int32_t *d_max; // device memory for the max values int32_t *d_min; // device memory for the min values cudaMalloc((void **)&d_max, blocks_reduction * sizeof(int)); cudaMalloc((void **)&d_min, blocks_reduction * sizeof(int)); int32_t n_reduction; // total number of values we want to put into the reduction kernel int host_max = 255; int host_min = 0; Clock gpu_clock; float time_in, time_kernel, time_out; /* TODO: * run each of your gpu implementations here, * get their time, * and save the output image to a file. * Don't forget to add the number of the kernel * as a prefix to the output filename: * Print the execution times by calling print_run(). */ /* * Kernel 1 * */ // create some basic information std::string gpu_file1 = "1"+base_gpu_output_filename; int32_t blocks_1 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); cudaMemcpy(d_source_img, source_img.matrix, n*sizeof(int), cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img1; copy_pgm_image_size(&source_img, &gpu_output_img1); int32_t *d_output_img1; cudaMalloc((void **)&d_output_img1, n*sizeof(int)); // call kernel1 gpu_clock.start(); kernel1<<< blocks_1, threads_per_block >>>(filter, filter_dimension, d_source_img, d_output_img1, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; reduction<<< blocks_reduction, threads_per_block >>>(d_output_img1, d_output_img1, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { reduction<<< blocks_reduction, threads_per_block >>>(d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back cudaMemcpy(&host_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize normalize1<<< blocks_1, threads_per_block >>>(d_output_img1, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); cudaMemcpy(gpu_output_img1.matrix, d_output_img1, n * sizeof(int), cudaMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 1, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file1.c_str(), &gpu_output_img1); // Free memory cudaFree(d_output_img1); destroy_pgm_image(&gpu_output_img1); /* * Kernel 1 end * */ /* * Kernel 2 * */ // create some basic information std::string gpu_file2 = "2"+base_gpu_output_filename; int32_t blocks_2 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); cudaMemcpy(d_source_img, source_img.matrix, n*sizeof(int), cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img2; copy_pgm_image_size(&source_img, &gpu_output_img2); int32_t *d_output_img2; cudaMalloc((void **)&d_output_img2, n*sizeof(int)); // call kernel gpu_clock.start(); kernel2<<< blocks_2, threads_per_block >>>(filter, filter_dimension, d_source_img, d_output_img2, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; reduction<<< blocks_reduction, threads_per_block >>>(d_output_img2, d_output_img2, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { reduction<<< blocks_reduction, threads_per_block >>>(d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back cudaMemcpy(&host_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize normalize2<<< blocks_2, threads_per_block >>>(d_output_img2, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); cudaMemcpy(gpu_output_img2.matrix, d_output_img2, n * sizeof(int), cudaMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 2, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file2.c_str(), &gpu_output_img2); // Free memory cudaFree(d_output_img2); destroy_pgm_image(&gpu_output_img2); /* * Kernel 2 end * */ /* * Kernel 3 * */ // create some basic information std::string gpu_file3 = "3"+base_gpu_output_filename; int nrows = 1; int32_t blocks_3 = ((source_img.height + nrows - 1)/nrows + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); cudaMemcpy(d_source_img, source_img.matrix, n*sizeof(int), cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img3; copy_pgm_image_size(&source_img, &gpu_output_img3); int32_t *d_output_img3; cudaMalloc((void **)&d_output_img3, n*sizeof(int)); // call kernel gpu_clock.start(); kernel3<<< blocks_3, threads_per_block >>>(filter, filter_dimension, d_source_img, d_output_img3, source_img.width, source_img.height, nrows); /* reduction to calculate max and min */ n_reduction = n; reduction<<< blocks_reduction, threads_per_block >>>(d_output_img3, d_output_img3, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { reduction<<< blocks_reduction, threads_per_block >>>(d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back cudaMemcpy(&host_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize normalize3<<< blocks_3, threads_per_block >>>(d_output_img3, source_img.width, source_img.height, host_min, host_max, nrows); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); cudaMemcpy(gpu_output_img3.matrix, d_output_img3, n * sizeof(int), cudaMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 3, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file3.c_str(), &gpu_output_img3); // Free memory cudaFree(d_output_img3); destroy_pgm_image(&gpu_output_img3); /* * Kernel 3 end * */ /* * Kernel 4 * */ // create some basic information std::string gpu_file4 = "4"+base_gpu_output_filename; int pixels_per_thread = 8; int32_t blocks_4 = ((n + pixels_per_thread - 1)/pixels_per_thread + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; // transfer in gpu_clock.start(); cudaMemcpy(d_source_img, source_img.matrix, n*sizeof(int), cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // create output image at host and device pgm_image gpu_output_img4; copy_pgm_image_size(&source_img, &gpu_output_img4); int32_t *d_output_img4; cudaMalloc((void **)&d_output_img4, n*sizeof(int)); // call kernel gpu_clock.start(); kernel4<<< blocks_4, threads_per_block >>>(filter, filter_dimension, d_source_img, d_output_img4, source_img.width, source_img.height); /* reduction to calculate max and min */ n_reduction = n; reduction<<< blocks_reduction, threads_per_block >>>(d_output_img4, d_output_img4, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { reduction<<< blocks_reduction, threads_per_block >>>(d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back cudaMemcpy(&host_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize normalize4<<< blocks_4, threads_per_block >>>(d_output_img4, source_img.width, source_img.height, host_min, host_max); time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); cudaMemcpy(gpu_output_img4.matrix, d_output_img4, n * sizeof(int), cudaMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 4, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file4.c_str(), &gpu_output_img4); // Free memory cudaFree(d_output_img4); destroy_pgm_image(&gpu_output_img4); /* * Kernel 4 end * */ /* * Kernel 5 * */ std::string gpu_file5 = "5"+base_gpu_output_filename; int32_t blocks_5 = (n + (threads_per_block - 1)) / threads_per_block; blocks_reduction = (n + (threads_per_block - 1)) / threads_per_block; int32_t *d_source_img2D; bool _2d = false; // 2D Pitch restriction if (source_img.width < 60000 && source_img.height < 60000) { _2d = true; } // transfer in gpu_clock.start(); // Prepare Resource/Texture objects cudaTextureObject_t tex; cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; if (_2d) { // Use 2D Pitch Resource // Reference: https://stackoverflow.com/questions/16380883/new-cuda-texture-object-getting-wrong-data-in-2d-case size_t pitch; cudaMallocPitch(&d_source_img2D, &pitch, sizeof(int) * source_img.width, source_img.height); cudaMemcpy2D(d_source_img2D, pitch, source_img.matrix, sizeof(int) * source_img.width, sizeof(int) * source_img.width, source_img.height, cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // Specify Resource details resDesc.resType = cudaResourceTypePitch2D; // for 2D texture resDesc.res.pitch2D.devPtr = d_source_img2D; resDesc.res.pitch2D.pitchInBytes = pitch; resDesc.res.pitch2D.width = source_img.width; resDesc.res.pitch2D.height = source_img.height; resDesc.res.pitch2D.desc.f = cudaChannelFormatKindFloat; resDesc.res.pitch2D.desc.x = 32; // bits per channel resDesc.res.pitch2D.desc.y = 0; } else { cudaMemcpy(d_source_img, source_img.matrix, n*sizeof(int), cudaMemcpyHostToDevice); time_in = gpu_clock.stop() * 1000; // Specify Resource details // Reference: https://devblogs.nvidia.com/cuda-pro-tip-kepler-texture-objects-improve-performance-and-flexibility/ resDesc.resType = cudaResourceTypeLinear; // for 1D texture resDesc.res.linear.devPtr = d_source_img; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = n*sizeof(int); } // Create texture object cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // create output image at host and device pgm_image gpu_output_img5; copy_pgm_image_size(&source_img, &gpu_output_img5); int32_t *d_output_img5; cudaMalloc((void **)&d_output_img5, n*sizeof(int)); dim3 gridSize((source_img.width + 31) / 32, (source_img.height + 31) / 32); dim3 blockSize(32, 32); // call kernel gpu_clock.start(); if (_2d) { kernel5<<< gridSize, blockSize >>>(filter_dimension, tex, d_output_img5, source_img.width, source_img.height); } else { kernel5_1d<<< blocks_5, threads_per_block >>>(filter_dimension, tex, d_output_img5, source_img.width, source_img.height); } /* reduction to calculate max and min */ n_reduction = n; reduction<<< blocks_reduction, threads_per_block >>>(d_output_img5, d_output_img5, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; for (int i = 0; i < num_iterations - 1; i++) { reduction<<< blocks_reduction, threads_per_block >>>(d_max, d_min, n_reduction, d_max, d_min); n_reduction = blocks_reduction; blocks_reduction = (blocks_reduction + (threads_per_block - 1)) / threads_per_block; } // copy min and max back cudaMemcpy(&host_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_min, d_min, sizeof(int), cudaMemcpyDeviceToHost); // printf("%d %d", host_max, host_min); // normalize if (_2d) { normalize5<<< gridSize, blockSize >>>(d_output_img5, source_img.width, source_img.height, host_min, host_max); } else { normalize5_1d<<< blocks_5, threads_per_block >>>(d_output_img5, source_img.width, source_img.height, host_min, host_max); } time_kernel = gpu_clock.stop() * 1000; // copy back to host gpu_clock.start(); cudaMemcpy(gpu_output_img5.matrix, d_output_img5, n * sizeof(int), cudaMemcpyDeviceToHost); time_out = gpu_clock.stop() * 1000; print_run(time_cpu, 5, time_kernel, time_in, time_out); save_pgm_to_file(gpu_file5.c_str(), &gpu_output_img5); // Free memory cudaFree(d_source_img2D); cudaFree(d_source_img); cudaFree(d_output_img5); destroy_pgm_image(&gpu_output_img5); /* * Kernel 5 end * */ // Free memory cudaDestroyTextureObject(tex); cudaFree(filter); cudaFree(d_max); cudaFree(d_min); }
8139d6cb70cbf9f51939dc8de5d849ad735a3b17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of data sharing environments/ // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" #include <stdio.h> // Number of threads in the CUDA block. __device__ static unsigned getNumThreads() { return blockDim.x; } // Thread ID in the CUDA block __device__ static unsigned getThreadId() { return threadIdx.x; } // Warp ID in the CUDA block __device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; } // Lane ID in the CUDA warp. __device__ static unsigned getLaneId() { return threadIdx.x % WARPSIZE; } // The CUDA thread ID of the master thread. __device__ static unsigned getMasterThreadId() { unsigned Mask = WARPSIZE - 1; return (getNumThreads() - 1) & (~Mask); } // Find the active threads in the warp - return a mask whose n-th bit is set if // the n-th thread in the warp is active. __device__ static unsigned getActiveThreadsMask() { return __BALLOT_SYNC(0xFFFFFFFF, true); } // Return true if this is the first active thread in the warp. __device__ static bool IsWarpMasterActiveThread() { unsigned long long Mask = getActiveThreadsMask(); unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE); unsigned long long Sh = Mask << ShNum; // Truncate Sh to the 32 lower bits return (unsigned)Sh == 0; } // Return true if this is the master thread. __device__ static bool IsMasterThread() { return !isSPMDMode() && getMasterThreadId() == getThreadId(); } /// Return the provided size aligned to the size of a pointer. __device__ static size_t AlignVal(size_t Val) { const size_t Align = (size_t)sizeof(void *); if (Val & (Align - 1)) { Val += Align; Val &= ~(Align - 1); } return Val; } #define DSFLAG 0 #define DSFLAG_INIT 0 #define DSPRINT(_flag, _str, _args...) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \ } \ } #define DSPRINT0(_flag, _str) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \ } \ } // Initialize the shared data structures. This is expected to be called for the // master thread and warp masters. \param RootS: A pointer to the root of the // data sharing stack. \param InitialDataSize: The initial size of the data in // the slot. EXTERN void __kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS, size_t InitialDataSize) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); DSPRINT0(DSFLAG_INIT, "Entering __kmpc_initialize_data_sharing_environment\n"); unsigned WID = getWarpId(); DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); __kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID, IsMasterThread()); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; // We don't need to initialize the frame and active threads. DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize); DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS); DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n", (long long)RootS->DataEnd); DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next); DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]); DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]); DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n"); } EXTERN void *__kmpc_data_sharing_environment_begin( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, size_t SharingDataSize, size_t SharingDefaultDataSize, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n"); // If the runtime has been elided, used __shared__ memory for master-worker // data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); unsigned WID = getWarpId(); unsigned CurActiveThreads = getActiveThreadsMask(); __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void * volatile &FrameP = DataSharingState.FramePtr[WID]; int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Save current slot/stack values.\n"); // Save the current values. *SavedSharedSlot = SlotP; *SavedSharedStack = StackP; *SavedSharedFrame = FrameP; *SavedActiveThreads = ActiveT; DSPRINT(DSFLAG, "Warp ID: %d\n", WID); DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); // Only the warp active master needs to grow the stack. if (IsWarpMasterActiveThread()) { // Save the current active threads. ActiveT = CurActiveThreads; // Make sure we use aligned sizes to avoid rematerialization of data. SharingDataSize = AlignVal(SharingDataSize); // FIXME: The default data size can be assumed to be aligned? SharingDefaultDataSize = AlignVal(SharingDefaultDataSize); // Check if we have room for the data in the current slot. const uintptr_t CurrentStartAddress = (uintptr_t)StackP; const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequiredEndAddress = CurrentStartAddress + (uintptr_t)SharingDataSize; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress); DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress); DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress); DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT); // If we require a new slot, allocate it and initialize it (or attempt to // reuse one). Also, set the shared stack and slot pointers to the new // place. If we do not need to grow the stack, just adapt the stack and // frame pointers. if (CurrentEndAddress < RequiredEndAddress) { size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize; __kmpc_data_sharing_slot *NewSlot = 0; // Attempt to reuse an existing slot. if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) { uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]); if (ExistingSlotSize >= NewSize) { DSPRINT(DSFLAG, "Reusing stack slot %016llx\n", (long long)ExistingSlot); NewSlot = ExistingSlot; } else { DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next); free(ExistingSlot); } } if (!NewSlot) { NewSlot = (__kmpc_data_sharing_slot *)malloc( sizeof(__kmpc_data_sharing_slot) + NewSize); DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize); } NewSlot->Next = 0; NewSlot->DataEnd = &NewSlot->Data[NewSize]; SlotP->Next = NewSlot; SlotP = NewSlot; StackP = &NewSlot->Data[SharingDataSize]; FrameP = &NewSlot->Data[0]; } else { // Clean up any old slot that we may still have. The slot producers, do // not eliminate them because that may be used to return data. if (SlotP->Next) { DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n", (long long)SlotP->Next); free(SlotP->Next); SlotP->Next = 0; } FrameP = StackP; StackP = (void *)RequiredEndAddress; } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n"); // All the threads in this warp get the frame they should work with. return FrameP; } EXTERN void __kmpc_data_sharing_environment_end( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, int32_t IsEntryPoint) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n"); unsigned WID = getWarpId(); if (IsEntryPoint) { if (IsWarpMasterActiveThread()) { DSPRINT0(DSFLAG, "Doing clean up\n"); // The master thread cleans the saved slot, because this is an environment // only for the master. __kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID]; if (S->Next) { free(S->Next); S->Next = 0; } } DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n"); return; } int32_t CurActive = getActiveThreadsMask(); // Only the warp master can restore the stack and frame information, and only // if there are no other threads left behind in this environment (i.e. the // warp diverged and returns in different places). This only works if we // assume that threads will converge right after the call site that started // the environment. if (IsWarpMasterActiveThread()) { int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Before restoring the stack\n"); // Zero the bits in the mask. If it is still different from zero, then we // have other threads that will return after the current ones. ActiveT &= ~CurActive; DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive, ActiveT); if (!ActiveT) { // No other active threads? Great, lets restore the stack. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void * volatile &FrameP = DataSharingState.FramePtr[WID]; SlotP = *SavedSharedSlot; StackP = *SavedSharedStack; FrameP = *SavedSharedFrame; ActiveT = *SavedActiveThreads; DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n"); return; } EXTERN void * __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n"); // If the runtime has been elided, use __shared__ memory for master-worker // data sharing. We're reusing the statically allocated data structure // that is used for standard data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; // Get the frame used by the requested thread. unsigned SourceWID = SourceThreadID / WARPSIZE; DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID); void * volatile P = DataSharingState.FramePtr[SourceWID]; DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n"); return P; } //////////////////////////////////////////////////////////////////////////////// // Runtime functions for trunk data sharing scheme. //////////////////////////////////////////////////////////////////////////////// INLINE void data_sharing_init_stack_common() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); for (int WID = 0; WID < WARPSIZE; WID++) { __kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; } } // Initialize data sharing data structure. This function needs to be called // once at the beginning of a data sharing context (coincides with the kernel // initialization). This function is called only by the MASTER thread of each // team in non-SPMD mode. EXTERN void __kmpc_data_sharing_init_stack() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); // This function initializes the stack pointer with the pointer to the // statically allocated shared memory slots. The size of a shared memory // slot is pre-determined to be 256 bytes. data_sharing_init_stack_common(); omptarget_nvptx_globalArgs.Init(); } // Initialize data sharing data structure. This function needs to be called // once at the beginning of a data sharing context (coincides with the kernel // initialization). This function is called in SPMD mode only. EXTERN void __kmpc_data_sharing_init_stack_spmd() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); // This function initializes the stack pointer with the pointer to the // statically allocated shared memory slots. The size of a shared memory // slot is pre-determined to be 256 bytes. if (threadIdx.x == 0) data_sharing_init_stack_common(); __threadfence_block(); } INLINE void* data_sharing_push_stack_common(size_t PushSize) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime."); // Only warp active master threads manage the stack. bool IsWarpMaster = (getThreadId() % WARPSIZE) == 0; // Add worst-case padding to DataSize so that future stack allocations are // correctly aligned. const size_t Alignment = 8; PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment; // Frame pointer must be visible to all workers in the same warp. unsigned WID = getWarpId(); void *volatile &FrameP = DataSharingState.FramePtr[WID]; if (IsWarpMaster) { // SlotP will point to either the shared memory slot or an existing // global memory slot. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; // Check if we have room for the data in the current slot. const uintptr_t StartAddress = (uintptr_t)StackP; const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize; // If we requested more data than there is room for in the rest // of the slot then we need to either re-use the next slot, if one exists, // or create a new slot. if (EndAddress < RequestedEndAddress) { __kmpc_data_sharing_slot *NewSlot = 0; size_t NewSize = PushSize; // Allocate at least the default size for each type of slot. // Master is a special case and even though there is only one thread, // it can share more things with the workers. For uniformity, it uses // the full size of a worker warp slot. size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size; if (DefaultSlotSize > NewSize) NewSize = DefaultSlotSize; NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc( sizeof(__kmpc_data_sharing_slot) + NewSize, "Global memory slot allocation."); NewSlot->Next = 0; NewSlot->Prev = SlotP; NewSlot->PrevSlotStackPtr = StackP; NewSlot->DataEnd = &NewSlot->Data[0] + NewSize; // Make previous slot point to the newly allocated slot. SlotP->Next = NewSlot; // The current slot becomes the new slot. SlotP = NewSlot; // The stack pointer always points to the next free stack frame. StackP = &NewSlot->Data[0] + PushSize; // The frame pointer always points to the beginning of the frame. FrameP = &NewSlot->Data[0]; } else { // Add the data chunk to the current slot. The frame pointer is set to // point to the start of the new frame held in StackP. FrameP = StackP; // Reset stack pointer to the requested address. StackP = (void *)RequestedEndAddress; } } else { while (!FrameP); } return FrameP; } EXTERN void* __kmpc_data_sharing_coalesced_push_stack(size_t DataSize, int16_t UseSharedMemory) { return data_sharing_push_stack_common(DataSize); } // Called at the time of the kernel initialization. This is used to initilize // the list of references to shared variables and to pre-allocate global storage // for holding the globalized variables. // // By default the globalized variables are stored in global memory. If the // UseSharedMemory is set to true, the runtime will attempt to use shared memory // as long as the size requested fits the pre-allocated size. EXTERN void* __kmpc_data_sharing_push_stack(size_t DataSize, int16_t UseSharedMemory) { // Compute the total memory footprint of the requested data. // The master thread requires a stack only for itself. A worker // thread (which at this point is a warp master) will require // space for the variables of each thread in the warp, // i.e. one DataSize chunk per warp lane. // TODO: change WARPSIZE to the number of active threads in the warp. size_t PushSize = (isRuntimeUninitialized() || IsMasterThread()) ? DataSize : WARPSIZE * DataSize; // Compute the start address of the frame of each thread in the warp. uintptr_t FrameStartAddress = (uintptr_t) data_sharing_push_stack_common(PushSize); FrameStartAddress += (uintptr_t) (getLaneId() * DataSize); return (void *)FrameStartAddress; } // Pop the stack and free any memory which can be reclaimed. // // When the pop operation removes the last global memory slot, // reclaim all outstanding global memory slots since it is // likely we have reached the end of the kernel. EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime."); __threadfence_block(); if (getThreadId() % WARPSIZE == 0) { unsigned WID = getWarpId(); // Current slot __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; // Pointer to next available stack. void *&StackP = DataSharingState.StackPtr[WID]; // Pop the frame. StackP = FrameStart; // If the current slot is empty, we need to free the slot after the // pop. bool SlotEmpty = (StackP == &SlotP->Data[0]); if (SlotEmpty && SlotP->Prev) { // Before removing the slot we need to reset StackP. StackP = SlotP->PrevSlotStackPtr; // Remove the slot. SlotP = SlotP->Prev; SafeFree(SlotP->Next, "Free slot."); SlotP->Next = 0; } } } // Begin a data sharing context. Maintain a list of references to shared // variables. This list of references to shared variables will be passed // to one or more threads. // In L0 data sharing this is called by master thread. // In L1 data sharing this is called by active warp master thread. EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) { omptarget_nvptx_globalArgs.EnsureSize(nArgs); *GlobalArgs = omptarget_nvptx_globalArgs.GetArgs(); } // End a data sharing context. There is no need to have a list of refs // to shared variables because the context in which those variables were // shared has now ended. This should clean-up the list of references only // without affecting the actual global storage of the variables. // In L0 data sharing this is called by master thread. // In L1 data sharing this is called by active warp master thread. EXTERN void __kmpc_end_sharing_variables() { omptarget_nvptx_globalArgs.DeInit(); } // This function will return a list of references to global variables. This // is how the workers will get a reference to the globalized variable. The // members of this list will be passed to the outlined parallel function // preserving the order. // Called by all workers. EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) { *GlobalArgs = omptarget_nvptx_globalArgs.GetArgs(); } // This function is used to init static memory manager. This manager is used to // manage statically allocated global memory. This memory is allocated by the // compiler and used to correctly implement globalization of the variables in // target, teams and distribute regions. EXTERN void __kmpc_get_team_static_memory(const void *buf, size_t size, int16_t is_shared, const void **frame) { if (is_shared) { *frame = buf; return; } if (isSPMDMode()) { if (GetThreadIdInBlock() == 0) { *frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size); } __syncthreads(); return; } ASSERT0(LT_FUSSY, GetThreadIdInBlock() == getMasterThreadId(), "Must be called only in the target master thread."); *frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size); __threadfence(); } EXTERN void __kmpc_restore_team_static_memory(int16_t is_shared) { if (is_shared) return; if (isSPMDMode()) { __syncthreads(); if (GetThreadIdInBlock() == 0) { omptarget_nvptx_simpleMemoryManager.Release(); } return; } __threadfence(); ASSERT0(LT_FUSSY, GetThreadIdInBlock() == getMasterThreadId(), "Must be called only in the target master thread."); omptarget_nvptx_simpleMemoryManager.Release(); }
8139d6cb70cbf9f51939dc8de5d849ad735a3b17.cu
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of data sharing environments/ // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" #include <stdio.h> // Number of threads in the CUDA block. __device__ static unsigned getNumThreads() { return blockDim.x; } // Thread ID in the CUDA block __device__ static unsigned getThreadId() { return threadIdx.x; } // Warp ID in the CUDA block __device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; } // Lane ID in the CUDA warp. __device__ static unsigned getLaneId() { return threadIdx.x % WARPSIZE; } // The CUDA thread ID of the master thread. __device__ static unsigned getMasterThreadId() { unsigned Mask = WARPSIZE - 1; return (getNumThreads() - 1) & (~Mask); } // Find the active threads in the warp - return a mask whose n-th bit is set if // the n-th thread in the warp is active. __device__ static unsigned getActiveThreadsMask() { return __BALLOT_SYNC(0xFFFFFFFF, true); } // Return true if this is the first active thread in the warp. __device__ static bool IsWarpMasterActiveThread() { unsigned long long Mask = getActiveThreadsMask(); unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE); unsigned long long Sh = Mask << ShNum; // Truncate Sh to the 32 lower bits return (unsigned)Sh == 0; } // Return true if this is the master thread. __device__ static bool IsMasterThread() { return !isSPMDMode() && getMasterThreadId() == getThreadId(); } /// Return the provided size aligned to the size of a pointer. __device__ static size_t AlignVal(size_t Val) { const size_t Align = (size_t)sizeof(void *); if (Val & (Align - 1)) { Val += Align; Val &= ~(Align - 1); } return Val; } #define DSFLAG 0 #define DSFLAG_INIT 0 #define DSPRINT(_flag, _str, _args...) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \ } \ } #define DSPRINT0(_flag, _str) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \ } \ } // Initialize the shared data structures. This is expected to be called for the // master thread and warp masters. \param RootS: A pointer to the root of the // data sharing stack. \param InitialDataSize: The initial size of the data in // the slot. EXTERN void __kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS, size_t InitialDataSize) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); DSPRINT0(DSFLAG_INIT, "Entering __kmpc_initialize_data_sharing_environment\n"); unsigned WID = getWarpId(); DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); __kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID, IsMasterThread()); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; // We don't need to initialize the frame and active threads. DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize); DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS); DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n", (long long)RootS->DataEnd); DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next); DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]); DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]); DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n"); } EXTERN void *__kmpc_data_sharing_environment_begin( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, size_t SharingDataSize, size_t SharingDefaultDataSize, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n"); // If the runtime has been elided, used __shared__ memory for master-worker // data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); unsigned WID = getWarpId(); unsigned CurActiveThreads = getActiveThreadsMask(); __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void * volatile &FrameP = DataSharingState.FramePtr[WID]; int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Save current slot/stack values.\n"); // Save the current values. *SavedSharedSlot = SlotP; *SavedSharedStack = StackP; *SavedSharedFrame = FrameP; *SavedActiveThreads = ActiveT; DSPRINT(DSFLAG, "Warp ID: %d\n", WID); DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); // Only the warp active master needs to grow the stack. if (IsWarpMasterActiveThread()) { // Save the current active threads. ActiveT = CurActiveThreads; // Make sure we use aligned sizes to avoid rematerialization of data. SharingDataSize = AlignVal(SharingDataSize); // FIXME: The default data size can be assumed to be aligned? SharingDefaultDataSize = AlignVal(SharingDefaultDataSize); // Check if we have room for the data in the current slot. const uintptr_t CurrentStartAddress = (uintptr_t)StackP; const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequiredEndAddress = CurrentStartAddress + (uintptr_t)SharingDataSize; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress); DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress); DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress); DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT); // If we require a new slot, allocate it and initialize it (or attempt to // reuse one). Also, set the shared stack and slot pointers to the new // place. If we do not need to grow the stack, just adapt the stack and // frame pointers. if (CurrentEndAddress < RequiredEndAddress) { size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize; __kmpc_data_sharing_slot *NewSlot = 0; // Attempt to reuse an existing slot. if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) { uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]); if (ExistingSlotSize >= NewSize) { DSPRINT(DSFLAG, "Reusing stack slot %016llx\n", (long long)ExistingSlot); NewSlot = ExistingSlot; } else { DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next); free(ExistingSlot); } } if (!NewSlot) { NewSlot = (__kmpc_data_sharing_slot *)malloc( sizeof(__kmpc_data_sharing_slot) + NewSize); DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize); } NewSlot->Next = 0; NewSlot->DataEnd = &NewSlot->Data[NewSize]; SlotP->Next = NewSlot; SlotP = NewSlot; StackP = &NewSlot->Data[SharingDataSize]; FrameP = &NewSlot->Data[0]; } else { // Clean up any old slot that we may still have. The slot producers, do // not eliminate them because that may be used to return data. if (SlotP->Next) { DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n", (long long)SlotP->Next); free(SlotP->Next); SlotP->Next = 0; } FrameP = StackP; StackP = (void *)RequiredEndAddress; } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n"); // All the threads in this warp get the frame they should work with. return FrameP; } EXTERN void __kmpc_data_sharing_environment_end( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, int32_t IsEntryPoint) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n"); unsigned WID = getWarpId(); if (IsEntryPoint) { if (IsWarpMasterActiveThread()) { DSPRINT0(DSFLAG, "Doing clean up\n"); // The master thread cleans the saved slot, because this is an environment // only for the master. __kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID]; if (S->Next) { free(S->Next); S->Next = 0; } } DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n"); return; } int32_t CurActive = getActiveThreadsMask(); // Only the warp master can restore the stack and frame information, and only // if there are no other threads left behind in this environment (i.e. the // warp diverged and returns in different places). This only works if we // assume that threads will converge right after the call site that started // the environment. if (IsWarpMasterActiveThread()) { int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Before restoring the stack\n"); // Zero the bits in the mask. If it is still different from zero, then we // have other threads that will return after the current ones. ActiveT &= ~CurActive; DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive, ActiveT); if (!ActiveT) { // No other active threads? Great, lets restore the stack. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void * volatile &FrameP = DataSharingState.FramePtr[WID]; SlotP = *SavedSharedSlot; StackP = *SavedSharedStack; FrameP = *SavedSharedFrame; ActiveT = *SavedActiveThreads; DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n"); return; } EXTERN void * __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n"); // If the runtime has been elided, use __shared__ memory for master-worker // data sharing. We're reusing the statically allocated data structure // that is used for standard data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; // Get the frame used by the requested thread. unsigned SourceWID = SourceThreadID / WARPSIZE; DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID); void * volatile P = DataSharingState.FramePtr[SourceWID]; DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n"); return P; } //////////////////////////////////////////////////////////////////////////////// // Runtime functions for trunk data sharing scheme. //////////////////////////////////////////////////////////////////////////////// INLINE void data_sharing_init_stack_common() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); for (int WID = 0; WID < WARPSIZE; WID++) { __kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; } } // Initialize data sharing data structure. This function needs to be called // once at the beginning of a data sharing context (coincides with the kernel // initialization). This function is called only by the MASTER thread of each // team in non-SPMD mode. EXTERN void __kmpc_data_sharing_init_stack() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); // This function initializes the stack pointer with the pointer to the // statically allocated shared memory slots. The size of a shared memory // slot is pre-determined to be 256 bytes. data_sharing_init_stack_common(); omptarget_nvptx_globalArgs.Init(); } // Initialize data sharing data structure. This function needs to be called // once at the beginning of a data sharing context (coincides with the kernel // initialization). This function is called in SPMD mode only. EXTERN void __kmpc_data_sharing_init_stack_spmd() { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized."); // This function initializes the stack pointer with the pointer to the // statically allocated shared memory slots. The size of a shared memory // slot is pre-determined to be 256 bytes. if (threadIdx.x == 0) data_sharing_init_stack_common(); __threadfence_block(); } INLINE void* data_sharing_push_stack_common(size_t PushSize) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime."); // Only warp active master threads manage the stack. bool IsWarpMaster = (getThreadId() % WARPSIZE) == 0; // Add worst-case padding to DataSize so that future stack allocations are // correctly aligned. const size_t Alignment = 8; PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment; // Frame pointer must be visible to all workers in the same warp. unsigned WID = getWarpId(); void *volatile &FrameP = DataSharingState.FramePtr[WID]; if (IsWarpMaster) { // SlotP will point to either the shared memory slot or an existing // global memory slot. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; // Check if we have room for the data in the current slot. const uintptr_t StartAddress = (uintptr_t)StackP; const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize; // If we requested more data than there is room for in the rest // of the slot then we need to either re-use the next slot, if one exists, // or create a new slot. if (EndAddress < RequestedEndAddress) { __kmpc_data_sharing_slot *NewSlot = 0; size_t NewSize = PushSize; // Allocate at least the default size for each type of slot. // Master is a special case and even though there is only one thread, // it can share more things with the workers. For uniformity, it uses // the full size of a worker warp slot. size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size; if (DefaultSlotSize > NewSize) NewSize = DefaultSlotSize; NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc( sizeof(__kmpc_data_sharing_slot) + NewSize, "Global memory slot allocation."); NewSlot->Next = 0; NewSlot->Prev = SlotP; NewSlot->PrevSlotStackPtr = StackP; NewSlot->DataEnd = &NewSlot->Data[0] + NewSize; // Make previous slot point to the newly allocated slot. SlotP->Next = NewSlot; // The current slot becomes the new slot. SlotP = NewSlot; // The stack pointer always points to the next free stack frame. StackP = &NewSlot->Data[0] + PushSize; // The frame pointer always points to the beginning of the frame. FrameP = &NewSlot->Data[0]; } else { // Add the data chunk to the current slot. The frame pointer is set to // point to the start of the new frame held in StackP. FrameP = StackP; // Reset stack pointer to the requested address. StackP = (void *)RequestedEndAddress; } } else { while (!FrameP); } return FrameP; } EXTERN void* __kmpc_data_sharing_coalesced_push_stack(size_t DataSize, int16_t UseSharedMemory) { return data_sharing_push_stack_common(DataSize); } // Called at the time of the kernel initialization. This is used to initilize // the list of references to shared variables and to pre-allocate global storage // for holding the globalized variables. // // By default the globalized variables are stored in global memory. If the // UseSharedMemory is set to true, the runtime will attempt to use shared memory // as long as the size requested fits the pre-allocated size. EXTERN void* __kmpc_data_sharing_push_stack(size_t DataSize, int16_t UseSharedMemory) { // Compute the total memory footprint of the requested data. // The master thread requires a stack only for itself. A worker // thread (which at this point is a warp master) will require // space for the variables of each thread in the warp, // i.e. one DataSize chunk per warp lane. // TODO: change WARPSIZE to the number of active threads in the warp. size_t PushSize = (isRuntimeUninitialized() || IsMasterThread()) ? DataSize : WARPSIZE * DataSize; // Compute the start address of the frame of each thread in the warp. uintptr_t FrameStartAddress = (uintptr_t) data_sharing_push_stack_common(PushSize); FrameStartAddress += (uintptr_t) (getLaneId() * DataSize); return (void *)FrameStartAddress; } // Pop the stack and free any memory which can be reclaimed. // // When the pop operation removes the last global memory slot, // reclaim all outstanding global memory slots since it is // likely we have reached the end of the kernel. EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) { ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime."); __threadfence_block(); if (getThreadId() % WARPSIZE == 0) { unsigned WID = getWarpId(); // Current slot __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; // Pointer to next available stack. void *&StackP = DataSharingState.StackPtr[WID]; // Pop the frame. StackP = FrameStart; // If the current slot is empty, we need to free the slot after the // pop. bool SlotEmpty = (StackP == &SlotP->Data[0]); if (SlotEmpty && SlotP->Prev) { // Before removing the slot we need to reset StackP. StackP = SlotP->PrevSlotStackPtr; // Remove the slot. SlotP = SlotP->Prev; SafeFree(SlotP->Next, "Free slot."); SlotP->Next = 0; } } } // Begin a data sharing context. Maintain a list of references to shared // variables. This list of references to shared variables will be passed // to one or more threads. // In L0 data sharing this is called by master thread. // In L1 data sharing this is called by active warp master thread. EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) { omptarget_nvptx_globalArgs.EnsureSize(nArgs); *GlobalArgs = omptarget_nvptx_globalArgs.GetArgs(); } // End a data sharing context. There is no need to have a list of refs // to shared variables because the context in which those variables were // shared has now ended. This should clean-up the list of references only // without affecting the actual global storage of the variables. // In L0 data sharing this is called by master thread. // In L1 data sharing this is called by active warp master thread. EXTERN void __kmpc_end_sharing_variables() { omptarget_nvptx_globalArgs.DeInit(); } // This function will return a list of references to global variables. This // is how the workers will get a reference to the globalized variable. The // members of this list will be passed to the outlined parallel function // preserving the order. // Called by all workers. EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) { *GlobalArgs = omptarget_nvptx_globalArgs.GetArgs(); } // This function is used to init static memory manager. This manager is used to // manage statically allocated global memory. This memory is allocated by the // compiler and used to correctly implement globalization of the variables in // target, teams and distribute regions. EXTERN void __kmpc_get_team_static_memory(const void *buf, size_t size, int16_t is_shared, const void **frame) { if (is_shared) { *frame = buf; return; } if (isSPMDMode()) { if (GetThreadIdInBlock() == 0) { *frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size); } __syncthreads(); return; } ASSERT0(LT_FUSSY, GetThreadIdInBlock() == getMasterThreadId(), "Must be called only in the target master thread."); *frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size); __threadfence(); } EXTERN void __kmpc_restore_team_static_memory(int16_t is_shared) { if (is_shared) return; if (isSPMDMode()) { __syncthreads(); if (GetThreadIdInBlock() == 0) { omptarget_nvptx_simpleMemoryManager.Release(); } return; } __threadfence(); ASSERT0(LT_FUSSY, GetThreadIdInBlock() == getMasterThreadId(), "Must be called only in the target master thread."); omptarget_nvptx_simpleMemoryManager.Release(); }
dda6d00d9be80bb4241bb1a6b26bb32f79905a6d.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @precisions magmaHalf */ #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #include "magma_internal.h" #if TORCH_HIP_VERSION >= 7500 // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_dp2hp_device( int m, int n, const double *dA, int ldda, magmaHalf *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __float2half( float(dA[j*ldda]) ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __float2half( float(dA[j*ldda]) ); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_hp2dp_device( int m, int n, const magmaHalf *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = double(__half2float( dA[j*ldda] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = double(__half2float( dA[j*ldda] )); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_sp2hp_device( int m, int n, const float *dA, int ldda, magmaHalf *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __float2half( dA[j*ldda] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __float2half( dA[j*ldda] ); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_hp2sp_device( int m, int n, const magmaHalf *dA, int ldda, float *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __half2float( dA[j*ldda] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __half2float( dA[j*ldda] ); } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_dp2hp_kernel( int m, int n, const double *dA, int ldda, magmaHalf *dB, int lddb ) { #if TORCH_HIP_VERSION >= 7500 convert_dp2hp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_hp2dp_kernel( int m, int n, const magmaHalf *dA, int ldda, double *dB, int lddb ) { #if TORCH_HIP_VERSION >= 7500 convert_hp2dp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_sp2hp_kernel( int m, int n, const float *dA, int ldda, magmaHalf *dB, int lddb ) { #if TORCH_HIP_VERSION >= 7500 convert_sp2hp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_hp2sp_kernel( int m, int n, const magmaHalf *dA, int ldda, float *dB, int lddb ) { #if TORCH_HIP_VERSION >= 7500 convert_hp2sp_device(m, n, dA, ldda, dB, lddb); #endif } /***************************************************************************//** Purpose ------- HLACONVERT convert all or part of a two-dimensional matrix dA to another matrix dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB COMPLEX_16 array, dimension (LDDB,N) The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laconvert *******************************************************************************/ extern "C" void magmablas_convert_sp2hp( magma_int_t m, magma_int_t n, const float *dA, magma_int_t ldda, magmaHalf *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); hipLaunchKernelGGL(( convert_sp2hp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_hp2sp( magma_int_t m, magma_int_t n, const magmaHalf *dA, magma_int_t ldda, float *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); hipLaunchKernelGGL(( convert_hp2sp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_dp2hp( magma_int_t m, magma_int_t n, const double *dA, magma_int_t ldda, magmaHalf *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); hipLaunchKernelGGL(( convert_dp2hp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_hp2dp( magma_int_t m, magma_int_t n, const magmaHalf *dA, magma_int_t ldda, double *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); hipLaunchKernelGGL(( convert_hp2dp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } #endif
dda6d00d9be80bb4241bb1a6b26bb32f79905a6d.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @precisions magmaHalf */ #include <cuda.h> // for CUDA_VERSION #include "magma_internal.h" #if CUDA_VERSION >= 7500 // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_dp2hp_device( int m, int n, const double *dA, int ldda, magmaHalf *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __float2half( float(dA[j*ldda]) ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __float2half( float(dA[j*ldda]) ); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_hp2dp_device( int m, int n, const magmaHalf *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = double(__half2float( dA[j*ldda] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = double(__half2float( dA[j*ldda] )); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_sp2hp_device( int m, int n, const float *dA, int ldda, magmaHalf *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __float2half( dA[j*ldda] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __float2half( dA[j*ldda] ); } } } } /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void convert_hp2sp_device( int m, int n, const magmaHalf *dA, int ldda, float *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = __half2float( dA[j*ldda] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = __half2float( dA[j*ldda] ); } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_dp2hp_kernel( int m, int n, const double *dA, int ldda, magmaHalf *dB, int lddb ) { #if CUDA_VERSION >= 7500 convert_dp2hp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_hp2dp_kernel( int m, int n, const magmaHalf *dA, int ldda, double *dB, int lddb ) { #if CUDA_VERSION >= 7500 convert_hp2dp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_sp2hp_kernel( int m, int n, const float *dA, int ldda, magmaHalf *dB, int lddb ) { #if CUDA_VERSION >= 7500 convert_sp2hp_device(m, n, dA, ldda, dB, lddb); #endif } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void convert_hp2sp_kernel( int m, int n, const magmaHalf *dA, int ldda, float *dB, int lddb ) { #if CUDA_VERSION >= 7500 convert_hp2sp_device(m, n, dA, ldda, dB, lddb); #endif } /***************************************************************************//** Purpose ------- HLACONVERT convert all or part of a two-dimensional matrix dA to another matrix dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB COMPLEX_16 array, dimension (LDDB,N) The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laconvert *******************************************************************************/ extern "C" void magmablas_convert_sp2hp( magma_int_t m, magma_int_t n, const float *dA, magma_int_t ldda, magmaHalf *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); convert_sp2hp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_hp2sp( magma_int_t m, magma_int_t n, const magmaHalf *dA, magma_int_t ldda, float *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); convert_hp2sp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_dp2hp( magma_int_t m, magma_int_t n, const double *dA, magma_int_t ldda, magmaHalf *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); convert_dp2hp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } /***************************************************************************/ extern "C" void magmablas_convert_hp2dp( magma_int_t m, magma_int_t n, const magmaHalf *dA, magma_int_t ldda, double *dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; else if ( lddb < max(1,m)) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); convert_hp2dp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } #endif
b3f5f6c04dd339aee58a52e2f53641f9a166cf41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/system/hip/experimental/pinned_allocator.h> #include <thrust/copy.h> #include <vector> #include <assert.h> #include "ungapped_extender_gpu.h" #include "group_loader.h" #include "packed_alphabet_code.h" #include "score_matrix.h" #include "cuda_common.h" const int debug_q_p = 119; const int debug_db_p = 8963 + cuda_common::kOneSideMarginSize; using namespace std; __device__ int UngappedExtendOneSideCacheRef( packed_alphabet_code::PackedAlphabetCode sequence0_multi_code_cache, packed_alphabet_code::PackedAlphabetCode sequence1_multi_code_cache, const AlphabetCoder::Code sequence_delimiter, bool reversed, const int* __restrict__ score_matrix, const uint32_t number_letters, const int cutoff, const int trigger, int *score_ptr, int *best_score_ptr) { int score = *score_ptr; int best_score = *best_score_ptr; int threshold = best_score - cutoff; bool stop_flag = false; const uint32_t mask = (1 << 8) - 1; uint32_t sift_offset = reversed ? 8 * (packed_alphabet_code::kNumberCodesInBlock - 1) : 0; for (uint32_t i = 0; i < packed_alphabet_code::kNumberCodesInBlock; ++i) { const uint32_t shift_size = (8 * i); const uint32_t shift = reversed ? sift_offset - shift_size : shift_size; const AlphabetCoder::Code s0_c = (sequence0_multi_code_cache >> shift) & mask; const AlphabetCoder::Code s1_c = (sequence1_multi_code_cache >> shift) & mask; stop_flag = stop_flag || s0_c == sequence_delimiter || s1_c == sequence_delimiter; score += stop_flag ? 0 : score_matrix[s0_c * number_letters + s1_c]; best_score = score > best_score ? score : best_score; threshold = best_score - cutoff; stop_flag = stop_flag || score <= threshold || best_score > trigger; if (stop_flag) { break; } } *score_ptr = score; *best_score_ptr = best_score; return stop_flag; //return true; } __device__ int UngappedExtendOneSideDeviceRef( const packed_alphabet_code::PackedAlphabetCode* sequence0_multi_code, const uint32_t s0_start_p, const packed_alphabet_code::PackedAlphabetCode* sequence1_multi_code, const uint32_t s1_start_p, const AlphabetCoder::Code sequence_delimiter, bool reversed, const int* __restrict__ score_matrix, const uint32_t number_letters, const int cutoff, const int trigger, int current_best_score) { int score = current_best_score; int best_score = current_best_score; int increment = reversed ? -1 : 1; const uint32_t s0_multi_code_start_p = s0_start_p / packed_alphabet_code::kNumberCodesInBlock; const uint32_t s1_multi_code_start_p = s1_start_p / packed_alphabet_code::kNumberCodesInBlock; const uint32_t s0_multi_code_p_offset = s0_start_p - s0_multi_code_start_p * packed_alphabet_code::kNumberCodesInBlock; const uint32_t s1_multi_code_p_offset = s1_start_p - s1_multi_code_start_p * packed_alphabet_code::kNumberCodesInBlock; #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { printf("block p %d, %d\n", s0_multi_code_start_p, s1_multi_code_start_p); } #endif uint32_t s0_multi_code_p = s0_multi_code_start_p; uint32_t s1_multi_code_p = s1_multi_code_start_p; packed_alphabet_code::PackedAlphabetCode s0_cache[2]; packed_alphabet_code::PackedAlphabetCode s1_cache[2]; const uint32_t s0_shift_size = (8 * s0_multi_code_p_offset); const uint32_t s1_shift_size = (8 * s1_multi_code_p_offset); const uint32_t sift_offset = reversed ? 8 * (packed_alphabet_code::kNumberCodesInBlock - 1) : 0; const uint32_t s0_sift0 = reversed ? sift_offset - s0_shift_size : s0_shift_size; const uint32_t s0_sift1 = 8 * packed_alphabet_code::kNumberCodesInBlock - s0_sift0; const uint32_t s1_sift0 = reversed ? sift_offset - s1_shift_size : s1_shift_size; const uint32_t s1_sift1 = 8 * packed_alphabet_code::kNumberCodesInBlock - s1_sift0; s0_cache[0] = sequence0_multi_code[s0_multi_code_p]; s0_multi_code_p += increment; s1_cache[0] = sequence1_multi_code[s1_multi_code_p]; s1_multi_code_p += increment; #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { printf("cached0 %ld, %ld\n", s0_cache[0], s1_cache[0]); } #endif if (!reversed) { s0_cache[0] >>= s0_sift0; s1_cache[0] >>= s1_sift0; } else { s0_cache[0] <<= s0_sift0; s1_cache[0] <<= s1_sift0; } #if 0 if ((blockDim.x * blockIdx.x + threadIdx.x) == 3) { printf("edited cached0 %d, %d\n", s0_cache[0], s1_cache[0]); } #endif while (1) { #if 0 if (2321 <= s0_multi_code_p || 11959 <= s1_multi_code_p) { printf("job id : %d \n", (blockDim.x * blockIdx.x + threadIdx.x)); } #endif s0_cache[1] = sequence0_multi_code[s0_multi_code_p]; s0_multi_code_p += increment; s1_cache[1] = sequence1_multi_code[s1_multi_code_p]; s1_multi_code_p += increment; #if 0 if ((blockDim.x * blockIdx.x + threadIdx.x) == 3) { printf("cached1 %d, %d\n", s0_cache[1], s1_cache[1]); } #endif packed_alphabet_code::PackedAlphabetCode tmp = 0; if (!reversed) { tmp = s0_cache[1] << s0_sift1; s0_cache[0] |= tmp; s0_cache[1] >>= s0_sift0; tmp = s1_cache[1] << s1_sift1; s1_cache[0] |= tmp; s1_cache[1] >>= s1_sift0; } else { tmp = s0_cache[1] >> s0_sift1; s0_cache[0] |= tmp; s0_cache[1] <<= s0_sift0; tmp = s1_cache[1] >> s1_sift1; s1_cache[0] |= tmp; s1_cache[1] <<= s1_sift0; } #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { if (reversed) { printf("reverse "); } else { printf("foward "); } printf("multi_code %ld, %ld\n", s0_cache[0], s1_cache[0]); } #endif if (UngappedExtendOneSideCacheRef(s0_cache[0], s1_cache[0], sequence_delimiter, reversed, score_matrix, number_letters, cutoff, trigger, &score, &best_score)) { break; } s0_cache[0] = s0_cache[1]; s1_cache[0] = s1_cache[1]; } return best_score; } __global__ void UngappedExtendKernelRef( const packed_alphabet_code::PackedAlphabetCode* sequence0_multi_code, const packed_alphabet_code::PackedAlphabetCode* sequence1_multi_code, const uint32_t number_extensions, const uint32_t* query_ids, const uint32_t * sequence0_positions, const uint32_t* sequence1_positions, const AlphabetCoder::Code sequence_delimiter, const int* __restrict__ score_matrix, const uint32_t number_letters, const int* cutoffs, const int* triggers, int* best_scores) { const uint32_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thread_id_skip = gridDim.x * blockDim.x; for (uint32_t i = thread_id; i < number_extensions; i += thread_id_skip) { const uint32_t s0_p = sequence0_positions[i] + cuda_common::kOneSideMarginSize; const uint32_t s1_p = sequence1_positions[i] + cuda_common::kOneSideMarginSize; const uint32_t query_id = query_ids[i]; const int cutoff = cutoffs[query_id]; const int trigger = triggers[query_id]; int current_best_score = 0; current_best_score = UngappedExtendOneSideDeviceRef( sequence0_multi_code, s0_p - 1, sequence1_multi_code, s1_p - 1, sequence_delimiter, true, score_matrix, number_letters, cutoff, trigger, current_best_score); if (current_best_score <= trigger) { current_best_score = UngappedExtendOneSideDeviceRef( sequence0_multi_code, s0_p, sequence1_multi_code, s1_p, sequence_delimiter, false, score_matrix, number_letters, cutoff, trigger, current_best_score); } best_scores[i] = current_best_score; } return; } */
b3f5f6c04dd339aee58a52e2f53641f9a166cf41.cu
/* #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <thrust/copy.h> #include <vector> #include <assert.h> #include "ungapped_extender_gpu.h" #include "group_loader.h" #include "packed_alphabet_code.h" #include "score_matrix.h" #include "cuda_common.h" const int debug_q_p = 119; const int debug_db_p = 8963 + cuda_common::kOneSideMarginSize; using namespace std; __device__ int UngappedExtendOneSideCacheRef( packed_alphabet_code::PackedAlphabetCode sequence0_multi_code_cache, packed_alphabet_code::PackedAlphabetCode sequence1_multi_code_cache, const AlphabetCoder::Code sequence_delimiter, bool reversed, const int* __restrict__ score_matrix, const uint32_t number_letters, const int cutoff, const int trigger, int *score_ptr, int *best_score_ptr) { int score = *score_ptr; int best_score = *best_score_ptr; int threshold = best_score - cutoff; bool stop_flag = false; const uint32_t mask = (1 << 8) - 1; uint32_t sift_offset = reversed ? 8 * (packed_alphabet_code::kNumberCodesInBlock - 1) : 0; for (uint32_t i = 0; i < packed_alphabet_code::kNumberCodesInBlock; ++i) { const uint32_t shift_size = (8 * i); const uint32_t shift = reversed ? sift_offset - shift_size : shift_size; const AlphabetCoder::Code s0_c = (sequence0_multi_code_cache >> shift) & mask; const AlphabetCoder::Code s1_c = (sequence1_multi_code_cache >> shift) & mask; stop_flag = stop_flag || s0_c == sequence_delimiter || s1_c == sequence_delimiter; score += stop_flag ? 0 : score_matrix[s0_c * number_letters + s1_c]; best_score = score > best_score ? score : best_score; threshold = best_score - cutoff; stop_flag = stop_flag || score <= threshold || best_score > trigger; if (stop_flag) { break; } } *score_ptr = score; *best_score_ptr = best_score; return stop_flag; //return true; } __device__ int UngappedExtendOneSideDeviceRef( const packed_alphabet_code::PackedAlphabetCode* sequence0_multi_code, const uint32_t s0_start_p, const packed_alphabet_code::PackedAlphabetCode* sequence1_multi_code, const uint32_t s1_start_p, const AlphabetCoder::Code sequence_delimiter, bool reversed, const int* __restrict__ score_matrix, const uint32_t number_letters, const int cutoff, const int trigger, int current_best_score) { int score = current_best_score; int best_score = current_best_score; int increment = reversed ? -1 : 1; const uint32_t s0_multi_code_start_p = s0_start_p / packed_alphabet_code::kNumberCodesInBlock; const uint32_t s1_multi_code_start_p = s1_start_p / packed_alphabet_code::kNumberCodesInBlock; const uint32_t s0_multi_code_p_offset = s0_start_p - s0_multi_code_start_p * packed_alphabet_code::kNumberCodesInBlock; const uint32_t s1_multi_code_p_offset = s1_start_p - s1_multi_code_start_p * packed_alphabet_code::kNumberCodesInBlock; #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { printf("block p %d, %d\n", s0_multi_code_start_p, s1_multi_code_start_p); } #endif uint32_t s0_multi_code_p = s0_multi_code_start_p; uint32_t s1_multi_code_p = s1_multi_code_start_p; packed_alphabet_code::PackedAlphabetCode s0_cache[2]; packed_alphabet_code::PackedAlphabetCode s1_cache[2]; const uint32_t s0_shift_size = (8 * s0_multi_code_p_offset); const uint32_t s1_shift_size = (8 * s1_multi_code_p_offset); const uint32_t sift_offset = reversed ? 8 * (packed_alphabet_code::kNumberCodesInBlock - 1) : 0; const uint32_t s0_sift0 = reversed ? sift_offset - s0_shift_size : s0_shift_size; const uint32_t s0_sift1 = 8 * packed_alphabet_code::kNumberCodesInBlock - s0_sift0; const uint32_t s1_sift0 = reversed ? sift_offset - s1_shift_size : s1_shift_size; const uint32_t s1_sift1 = 8 * packed_alphabet_code::kNumberCodesInBlock - s1_sift0; s0_cache[0] = sequence0_multi_code[s0_multi_code_p]; s0_multi_code_p += increment; s1_cache[0] = sequence1_multi_code[s1_multi_code_p]; s1_multi_code_p += increment; #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { printf("cached0 %ld, %ld\n", s0_cache[0], s1_cache[0]); } #endif if (!reversed) { s0_cache[0] >>= s0_sift0; s1_cache[0] >>= s1_sift0; } else { s0_cache[0] <<= s0_sift0; s1_cache[0] <<= s1_sift0; } #if 0 if ((blockDim.x * blockIdx.x + threadIdx.x) == 3) { printf("edited cached0 %d, %d\n", s0_cache[0], s1_cache[0]); } #endif while (1) { #if 0 if (2321 <= s0_multi_code_p || 11959 <= s1_multi_code_p) { printf("job id : %d \n", (blockDim.x * blockIdx.x + threadIdx.x)); } #endif s0_cache[1] = sequence0_multi_code[s0_multi_code_p]; s0_multi_code_p += increment; s1_cache[1] = sequence1_multi_code[s1_multi_code_p]; s1_multi_code_p += increment; #if 0 if ((blockDim.x * blockIdx.x + threadIdx.x) == 3) { printf("cached1 %d, %d\n", s0_cache[1], s1_cache[1]); } #endif packed_alphabet_code::PackedAlphabetCode tmp = 0; if (!reversed) { tmp = s0_cache[1] << s0_sift1; s0_cache[0] |= tmp; s0_cache[1] >>= s0_sift0; tmp = s1_cache[1] << s1_sift1; s1_cache[0] |= tmp; s1_cache[1] >>= s1_sift0; } else { tmp = s0_cache[1] >> s0_sift1; s0_cache[0] |= tmp; s0_cache[1] <<= s0_sift0; tmp = s1_cache[1] >> s1_sift1; s1_cache[0] |= tmp; s1_cache[1] <<= s1_sift0; } #if 0 if ((debug_db_p - 1) == s1_start_p || debug_db_p == s1_start_p) { if (reversed) { printf("reverse "); } else { printf("foward "); } printf("multi_code %ld, %ld\n", s0_cache[0], s1_cache[0]); } #endif if (UngappedExtendOneSideCacheRef(s0_cache[0], s1_cache[0], sequence_delimiter, reversed, score_matrix, number_letters, cutoff, trigger, &score, &best_score)) { break; } s0_cache[0] = s0_cache[1]; s1_cache[0] = s1_cache[1]; } return best_score; } __global__ void UngappedExtendKernelRef( const packed_alphabet_code::PackedAlphabetCode* sequence0_multi_code, const packed_alphabet_code::PackedAlphabetCode* sequence1_multi_code, const uint32_t number_extensions, const uint32_t* query_ids, const uint32_t * sequence0_positions, const uint32_t* sequence1_positions, const AlphabetCoder::Code sequence_delimiter, const int* __restrict__ score_matrix, const uint32_t number_letters, const int* cutoffs, const int* triggers, int* best_scores) { const uint32_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thread_id_skip = gridDim.x * blockDim.x; for (uint32_t i = thread_id; i < number_extensions; i += thread_id_skip) { const uint32_t s0_p = sequence0_positions[i] + cuda_common::kOneSideMarginSize; const uint32_t s1_p = sequence1_positions[i] + cuda_common::kOneSideMarginSize; const uint32_t query_id = query_ids[i]; const int cutoff = cutoffs[query_id]; const int trigger = triggers[query_id]; int current_best_score = 0; current_best_score = UngappedExtendOneSideDeviceRef( sequence0_multi_code, s0_p - 1, sequence1_multi_code, s1_p - 1, sequence_delimiter, true, score_matrix, number_letters, cutoff, trigger, current_best_score); if (current_best_score <= trigger) { current_best_score = UngappedExtendOneSideDeviceRef( sequence0_multi_code, s0_p, sequence1_multi_code, s1_p, sequence_delimiter, false, score_matrix, number_letters, cutoff, trigger, current_best_score); } best_scores[i] = current_best_score; } return; } */
8c44a619d3678dcfd0ca33607c87fea2f8da9c80.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A CUDA program that demonstrates how to compute a stereo disparity map using * SIMD SAD (Sum of Absolute Difference) intrinsics */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, kernels #include <hip/hip_runtime.h> #include "sdkHelper.h" #include "stereoDisparity_kernel.cuh" // includes, project #include <helper_functions.h> // helper for shared that are common to CUDA Samples #include <helper_cuda.h> // helper for checking cuda initialization and error checking #include <helper_string.h> // helper functions for string parsing static const char *sSDKsample = "[stereoDisparity]\0"; int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! CUDA Sample for calculating depth maps //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { hipDeviceProp_t deviceProp; deviceProp.major = 0; deviceProp.minor = 0; int dev = 0; // This will pick the best possible CUDA capable device dev = findCudaDevice(argc, (const char **)argv); checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); // Statistics about the GPU device printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor); int version = (deviceProp.major * 0x10 + deviceProp.minor); if (version < 0x20) { printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample); exit(EXIT_SUCCESS); } StopWatchInterface *timer; sdkCreateTimer(&timer); // Search parameters int minDisp = -16; int maxDisp = 0; // Load image data //allocate mem for the images on host side //initialize pointers to NULL to request lib call to allocate as needed // PPM images are loaded into 4 byte/pixel memory (RGBX) unsigned char *h_img0 = NULL; unsigned char *h_img1 = NULL; unsigned int w, h; char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]); char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]); printf("Loaded <%s> as image 0\n", fname0); if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h)) { fprintf(stderr, "Failed to load <%s>\n", fname0); } printf("Loaded <%s> as image 1\n", fname1); if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h)) { fprintf(stderr, "Failed to load <%s>\n", fname1); } dim3 numThreads = dim3(blockSize_x, blockSize_y, 1); dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y)); unsigned int numData = w*h; unsigned int memSize = sizeof(int) * numData; //allocate mem for the result on host side unsigned int *h_odata = (unsigned int *)malloc(memSize); //initialize the memory for (unsigned int i = 0; i < numData; i++) h_odata[i] = 0; // allocate device memory for result unsigned int *d_odata, *d_img0, *d_img1; checkCudaErrors(hipMalloc((void **) &d_odata, memSize)); checkCudaErrors(hipMalloc((void **) &d_img0, memSize)); checkCudaErrors(hipMalloc((void **) &d_img1, memSize)); // copy host memory to device to initialize to zeros checkCudaErrors(hipMemcpy(d_img0, h_img0, memSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_img1, h_img1, memSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_odata, h_odata, memSize, hipMemcpyHostToDevice)); size_t offset = 0; hipChannelFormatDesc ca_desc0 = hipCreateChannelDesc<unsigned int>(); hipChannelFormatDesc ca_desc1 = hipCreateChannelDesc<unsigned int>(); tex2Dleft.addressMode[0] = hipAddressModeClamp; tex2Dleft.addressMode[1] = hipAddressModeClamp; tex2Dleft.filterMode = hipFilterModePoint; tex2Dleft.normalized = false; tex2Dright.addressMode[0] = hipAddressModeClamp; tex2Dright.addressMode[1] = hipAddressModeClamp; tex2Dright.filterMode = hipFilterModePoint; tex2Dright.normalized = false; checkCudaErrors(hipBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4)); assert(offset == 0); checkCudaErrors(hipBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4)); assert(offset == 0); // First run the warmup kernel (which we'll use to get the GPU in the correct max power state hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); printf("Launching CUDA stereoDisparityKernel()\n"); // Record the start event checkCudaErrors(hipEventRecord(start, NULL)); // launch the stereoDisparity kernel hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp); // Record the stop event checkCudaErrors(hipEventRecord(stop, NULL)); // Wait for the stop event to complete checkCudaErrors(hipEventSynchronize(stop)); // Check to make sure the kernel didn't fail getLastCudaError("Kernel execution failed"); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); //Copy result from device to host for verification checkCudaErrors(hipMemcpy(h_odata, d_odata, memSize, hipMemcpyDeviceToHost)); printf("Input Size [%dx%d], ", w, h); printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1)); printf("Disparities [%d:%d]\n", minDisp, maxDisp); printf("GPU processing time : %.4f (ms)\n", msecTotal); printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000); // calculate sum of resultant GPU image unsigned int checkSum = 0; for (unsigned int i=0 ; i<w *h ; i++) { checkSum += h_odata[i]; } printf("GPU Checksum = %u, ", checkSum); // write out the resulting disparity image. unsigned char *dispOut = (unsigned char *)malloc(numData); int mult = 20; const char *fnameOut = "output_GPU.pgm"; for (unsigned int i=0; i<numData; i++) { dispOut[i] = (int)h_odata[i]*mult; } printf("GPU image: <%s>\n", fnameOut); sdkSavePGM(fnameOut, dispOut, w, h); //compute reference solution printf("Computing CPU reference...\n"); cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp); unsigned int cpuCheckSum = 0; for (unsigned int i=0 ; i<w *h ; i++) { cpuCheckSum += h_odata[i]; } printf("CPU Checksum = %u, ", cpuCheckSum); const char *cpuFnameOut = "output_CPU.pgm"; for (unsigned int i=0; i<numData; i++) { dispOut[i] = (int)h_odata[i]*mult; } printf("CPU image: <%s>\n", cpuFnameOut); sdkSavePGM(cpuFnameOut, dispOut, w, h); // cleanup memory checkCudaErrors(hipFree(d_odata)); checkCudaErrors(hipFree(d_img0)); checkCudaErrors(hipFree(d_img1)); if (h_odata != NULL) free(h_odata); if (h_img0 != NULL) free(h_img0); if (h_img1 != NULL) free(h_img1); if (dispOut != NULL) free(dispOut); sdkDeleteTimer(&timer); exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE); }
8c44a619d3678dcfd0ca33607c87fea2f8da9c80.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* A CUDA program that demonstrates how to compute a stereo disparity map using * SIMD SAD (Sum of Absolute Difference) intrinsics */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, kernels #include <cuda_runtime.h> #include "sdkHelper.h" #include "stereoDisparity_kernel.cuh" // includes, project #include <helper_functions.h> // helper for shared that are common to CUDA Samples #include <helper_cuda.h> // helper for checking cuda initialization and error checking #include <helper_string.h> // helper functions for string parsing static const char *sSDKsample = "[stereoDisparity]\0"; int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! CUDA Sample for calculating depth maps //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { cudaDeviceProp deviceProp; deviceProp.major = 0; deviceProp.minor = 0; int dev = 0; // This will pick the best possible CUDA capable device dev = findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); // Statistics about the GPU device printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor); int version = (deviceProp.major * 0x10 + deviceProp.minor); if (version < 0x20) { printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample); exit(EXIT_SUCCESS); } StopWatchInterface *timer; sdkCreateTimer(&timer); // Search parameters int minDisp = -16; int maxDisp = 0; // Load image data //allocate mem for the images on host side //initialize pointers to NULL to request lib call to allocate as needed // PPM images are loaded into 4 byte/pixel memory (RGBX) unsigned char *h_img0 = NULL; unsigned char *h_img1 = NULL; unsigned int w, h; char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]); char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]); printf("Loaded <%s> as image 0\n", fname0); if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h)) { fprintf(stderr, "Failed to load <%s>\n", fname0); } printf("Loaded <%s> as image 1\n", fname1); if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h)) { fprintf(stderr, "Failed to load <%s>\n", fname1); } dim3 numThreads = dim3(blockSize_x, blockSize_y, 1); dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y)); unsigned int numData = w*h; unsigned int memSize = sizeof(int) * numData; //allocate mem for the result on host side unsigned int *h_odata = (unsigned int *)malloc(memSize); //initialize the memory for (unsigned int i = 0; i < numData; i++) h_odata[i] = 0; // allocate device memory for result unsigned int *d_odata, *d_img0, *d_img1; checkCudaErrors(cudaMalloc((void **) &d_odata, memSize)); checkCudaErrors(cudaMalloc((void **) &d_img0, memSize)); checkCudaErrors(cudaMalloc((void **) &d_img1, memSize)); // copy host memory to device to initialize to zeros checkCudaErrors(cudaMemcpy(d_img0, h_img0, memSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_img1, h_img1, memSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_odata, h_odata, memSize, cudaMemcpyHostToDevice)); size_t offset = 0; cudaChannelFormatDesc ca_desc0 = cudaCreateChannelDesc<unsigned int>(); cudaChannelFormatDesc ca_desc1 = cudaCreateChannelDesc<unsigned int>(); tex2Dleft.addressMode[0] = cudaAddressModeClamp; tex2Dleft.addressMode[1] = cudaAddressModeClamp; tex2Dleft.filterMode = cudaFilterModePoint; tex2Dleft.normalized = false; tex2Dright.addressMode[0] = cudaAddressModeClamp; tex2Dright.addressMode[1] = cudaAddressModeClamp; tex2Dright.filterMode = cudaFilterModePoint; tex2Dright.normalized = false; checkCudaErrors(cudaBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4)); assert(offset == 0); checkCudaErrors(cudaBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4)); assert(offset == 0); // First run the warmup kernel (which we'll use to get the GPU in the correct max power state stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); printf("Launching CUDA stereoDisparityKernel()\n"); // Record the start event checkCudaErrors(cudaEventRecord(start, NULL)); // launch the stereoDisparity kernel stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp); // Record the stop event checkCudaErrors(cudaEventRecord(stop, NULL)); // Wait for the stop event to complete checkCudaErrors(cudaEventSynchronize(stop)); // Check to make sure the kernel didn't fail getLastCudaError("Kernel execution failed"); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); //Copy result from device to host for verification checkCudaErrors(cudaMemcpy(h_odata, d_odata, memSize, cudaMemcpyDeviceToHost)); printf("Input Size [%dx%d], ", w, h); printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1)); printf("Disparities [%d:%d]\n", minDisp, maxDisp); printf("GPU processing time : %.4f (ms)\n", msecTotal); printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000); // calculate sum of resultant GPU image unsigned int checkSum = 0; for (unsigned int i=0 ; i<w *h ; i++) { checkSum += h_odata[i]; } printf("GPU Checksum = %u, ", checkSum); // write out the resulting disparity image. unsigned char *dispOut = (unsigned char *)malloc(numData); int mult = 20; const char *fnameOut = "output_GPU.pgm"; for (unsigned int i=0; i<numData; i++) { dispOut[i] = (int)h_odata[i]*mult; } printf("GPU image: <%s>\n", fnameOut); sdkSavePGM(fnameOut, dispOut, w, h); //compute reference solution printf("Computing CPU reference...\n"); cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp); unsigned int cpuCheckSum = 0; for (unsigned int i=0 ; i<w *h ; i++) { cpuCheckSum += h_odata[i]; } printf("CPU Checksum = %u, ", cpuCheckSum); const char *cpuFnameOut = "output_CPU.pgm"; for (unsigned int i=0; i<numData; i++) { dispOut[i] = (int)h_odata[i]*mult; } printf("CPU image: <%s>\n", cpuFnameOut); sdkSavePGM(cpuFnameOut, dispOut, w, h); // cleanup memory checkCudaErrors(cudaFree(d_odata)); checkCudaErrors(cudaFree(d_img0)); checkCudaErrors(cudaFree(d_img1)); if (h_odata != NULL) free(h_odata); if (h_img0 != NULL) free(h_img0); if (h_img1 != NULL) free(h_img1); if (dispOut != NULL) free(dispOut); sdkDeleteTimer(&timer); exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE); }
185b500d4fa7adf5eec331b090db0d32c1ace5ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; o0_i = ii_i0_value * ii_i1_data[0] * ii_i2_data[0]; ii_o0_data[0] = o0_i; } } // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; o0_i = ii_i0_value * ii_i1_data[0] * ii_i2_data[0]; ii_o0_data[0] = o0_i; } } // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; o0_i = ii_i0_value * i1_data[i] * i2_data[i]; o0_data[i] = o0_i; } } static void can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[3][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<3;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<3;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_1[2] = {1,1}; can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2 { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V1; __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_10: double __DUMMY_10; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V3)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V3)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[1], 1); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[1], 1); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 1 passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Mul START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {1, 1}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {0, 0}; //std::cerr << "C_CODE Mul checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Mul checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Mul checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Mul checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Mul checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Mul checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } Py_XDECREF(V1); V1 = V5; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 1, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } else // no error { } } //std::cerr << "C_CODE Mul END\n"; __label_9: double __DUMMY_9; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_executor(__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2* self) { return self->run(); } static void __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_destructor(void* executor, void* self) { delete ((__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (5 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2* struct_ptr = new __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_executor), struct_ptr, __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init960910ffb5a3dd33ea4a96e9b1d75ab2(void){ (void) Py_InitModule("960910ffb5a3dd33ea4a96e9b1d75ab2", MyMethods); }
185b500d4fa7adf5eec331b090db0d32c1ace5ee.cu
#include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; o0_i = ii_i0_value * ii_i1_data[0] * ii_i2_data[0]; ii_o0_data[0] = o0_i; } } // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; o0_i = ii_i0_value * ii_i1_data[0] * ii_i2_data[0]; ii_o0_data[0] = o0_i; } } // GpuElemwise{Mul}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True, True)) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; o0_i = ii_i0_value * i1_data[i] * i2_data[i]; o0_data[i] = o0_i; } } static void can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[3][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<3;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<3;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_1[2] = {1,1}; can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_960910ffb5a3dd33ea4a96e9b1d75ab2_0 Mul", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Mul_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2 { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V1; __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_10: double __DUMMY_10; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V3)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V3)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[1], 1); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 1 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[1], 1); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 1 passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Mul START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {1, 1}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {0, 0}; //std::cerr << "C_CODE Mul checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Mul checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Mul checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Mul checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Mul checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Mul checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } Py_XDECREF(V1); V1 = V5; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 1, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_960910ffb5a3dd33ea4a96e9b1d75ab2_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } else // no error { } } //std::cerr << "C_CODE Mul END\n"; __label_9: double __DUMMY_9; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_executor(__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2* self) { return self->run(); } static void __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_destructor(void* executor, void* self) { delete ((__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (5 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2* struct_ptr = new __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_executor), struct_ptr, __struct_compiled_op_960910ffb5a3dd33ea4a96e9b1d75ab2_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init960910ffb5a3dd33ea4a96e9b1d75ab2(void){ (void) Py_InitModule("960910ffb5a3dd33ea4a96e9b1d75ab2", MyMethods); }
1bbac6edcb8ac9ae9e5009b9672dba912f372a6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/freespace_param_bottomup_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void FindBottomUpEdge(const int count,int num,int channels,int height,int width,int xstage,const Dtype * bottomdata, Dtype * topdata ) { CUDA_KERNEL_LOOP(index, count) { int v = index; int n = v / xstage; int c = 0; int xstep = width / (xstage-1); int current_stage = index % xstage; int w = current_stage * xstep; if(current_stage == xstage -1) { w = width -1; } int samplecount = channels * height * width; int spcount = height * width; int h=0; bool findedge = false; Dtype value = height-1; for(h=height-1;h>0;h-=1) { if(bottomdata[n*samplecount + c * spcount + h * width + w] ==1 && bottomdata[n*samplecount + c * spcount + (h-1) * width + w]==0) { findedge = true; value = h; break; } } topdata[n * xstage + current_stage] = height -1 - value; } } template <typename Dtype> __global__ void FindBottomUpEdge_TowDim(const int count,int num,int channels,int height,int width,int xstage,int ystage,const Dtype * bottomdata, Dtype * topdata ) { CUDA_KERNEL_LOOP(index, count) { int v = index; int n = v / xstage; int c = 0; int xstep = width / (xstage-1); int current_stage = index % xstage; int w = current_stage * xstep; if(current_stage == xstage -1) { w = width -1; } int samplecount = channels * height * width; int spcount = height * width; int h=0; bool findedge = false; Dtype value = height-1; for(h=height-1;h>0;h-=1) { if(bottomdata[n*samplecount + c * spcount + h * width + w] ==1 && bottomdata[n*samplecount + c * spcount + (h-1) * width + w]==0) { findedge = true; value = h; break; } } int current_stage_x = current_stage; int k=0; int src_pos_y = 0; for(k=0;k<ystage;k++) { src_pos_y = float(k) * float(height) / float(ystage -1);; if(k == ystage -1) { src_pos_y = height -1; } topdata[n*1*xstage*ystage + k * xstage + current_stage_x] = value - src_pos_y; } } } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu_onedim(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int xstage = this->layer_param_.freespace_param_bottomup_param().xstage(); const Dtype * bottomdata = bottom[0]->gpu_data(); Dtype * topdata = top[0]->mutable_gpu_data(); const int count = num * xstage; hipLaunchKernelGGL(( FindBottomUpEdge<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, num,channels,height,width,xstage,bottomdata,topdata); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu_twodim(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int xstage = this->layer_param_.freespace_param_bottomup_param().xstage(); int ystage = this->layer_param_.freespace_param_bottomup_param().ystage(); const Dtype * bottomdata = bottom[0]->gpu_data(); Dtype * topdata = top[0]->mutable_gpu_data(); const int count = num * xstage; hipLaunchKernelGGL(( FindBottomUpEdge_TowDim<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, num,channels,height,width,xstage,ystage,bottomdata,topdata); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.freespace_param_bottomup_param().type()) { case FreespaceParamBottomUpParameter_Type_ONEDIM: this->Forward_gpu_onedim(bottom,top); break; case FreespaceParamBottomUpParameter_Type_TWODIM: this->Forward_gpu_twodim(bottom,top); break; default: LOG(FATAL)<<"unknown type"; break; } } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { this->Backward_gpu(top,propagate_down,bottom); } INSTANTIATE_LAYER_GPU_FUNCS(FreespaceParamBottomUpLayer); } // namespace caffe
1bbac6edcb8ac9ae9e5009b9672dba912f372a6f.cu
#include <vector> #include "caffe/layers/freespace_param_bottomup_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void FindBottomUpEdge(const int count,int num,int channels,int height,int width,int xstage,const Dtype * bottomdata, Dtype * topdata ) { CUDA_KERNEL_LOOP(index, count) { int v = index; int n = v / xstage; int c = 0; int xstep = width / (xstage-1); int current_stage = index % xstage; int w = current_stage * xstep; if(current_stage == xstage -1) { w = width -1; } int samplecount = channels * height * width; int spcount = height * width; int h=0; bool findedge = false; Dtype value = height-1; for(h=height-1;h>0;h-=1) { if(bottomdata[n*samplecount + c * spcount + h * width + w] ==1 && bottomdata[n*samplecount + c * spcount + (h-1) * width + w]==0) { findedge = true; value = h; break; } } topdata[n * xstage + current_stage] = height -1 - value; } } template <typename Dtype> __global__ void FindBottomUpEdge_TowDim(const int count,int num,int channels,int height,int width,int xstage,int ystage,const Dtype * bottomdata, Dtype * topdata ) { CUDA_KERNEL_LOOP(index, count) { int v = index; int n = v / xstage; int c = 0; int xstep = width / (xstage-1); int current_stage = index % xstage; int w = current_stage * xstep; if(current_stage == xstage -1) { w = width -1; } int samplecount = channels * height * width; int spcount = height * width; int h=0; bool findedge = false; Dtype value = height-1; for(h=height-1;h>0;h-=1) { if(bottomdata[n*samplecount + c * spcount + h * width + w] ==1 && bottomdata[n*samplecount + c * spcount + (h-1) * width + w]==0) { findedge = true; value = h; break; } } int current_stage_x = current_stage; int k=0; int src_pos_y = 0; for(k=0;k<ystage;k++) { src_pos_y = float(k) * float(height) / float(ystage -1);; if(k == ystage -1) { src_pos_y = height -1; } topdata[n*1*xstage*ystage + k * xstage + current_stage_x] = value - src_pos_y; } } } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu_onedim(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int xstage = this->layer_param_.freespace_param_bottomup_param().xstage(); const Dtype * bottomdata = bottom[0]->gpu_data(); Dtype * topdata = top[0]->mutable_gpu_data(); const int count = num * xstage; FindBottomUpEdge<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, num,channels,height,width,xstage,bottomdata,topdata); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu_twodim(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int xstage = this->layer_param_.freespace_param_bottomup_param().xstage(); int ystage = this->layer_param_.freespace_param_bottomup_param().ystage(); const Dtype * bottomdata = bottom[0]->gpu_data(); Dtype * topdata = top[0]->mutable_gpu_data(); const int count = num * xstage; FindBottomUpEdge_TowDim<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, num,channels,height,width,xstage,ystage,bottomdata,topdata); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.freespace_param_bottomup_param().type()) { case FreespaceParamBottomUpParameter_Type_ONEDIM: this->Forward_gpu_onedim(bottom,top); break; case FreespaceParamBottomUpParameter_Type_TWODIM: this->Forward_gpu_twodim(bottom,top); break; default: LOG(FATAL)<<"unknown type"; break; } } template <typename Dtype> void FreespaceParamBottomUpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { this->Backward_gpu(top,propagate_down,bottom); } INSTANTIATE_LAYER_GPU_FUNCS(FreespaceParamBottomUpLayer); } // namespace caffe
50df611a694cf8e051b8b6dde727160a5b1e2d6d.hip
// !!! This is a file automatically generated by hipify!!! #define USE_SHARED #include <iostream> #include <fstream> #include <cstdio> #include <hip/hip_runtime.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" // 2D diffusion example // the grid has a fixed width of nx=128 // the use specifies the height, ny, as a power of two // note that nx and ny have 2 added to them to account for halos template <typename T> void fill_gpu(T *v, T value, int n); void write_to_file(int nx, int ny, double* data); // NOTE : i-major ordering, i.e. x[i,j] is indexed at location [i+j*nx] // for(i=1; i<nx-1; ++i) // for(j=1; j<ny-1; ++j) // x1[i,j] = x0[i,j] + dt * (-4.*x0[i,j] // + x0[i,j-1] + x0[i,j+1] // + x0[i-1,j] + x0[i+1,j]); __global__ void diffusion(double *x0, double *x1, int nx, int ny, double dt) { auto i = threadIdx.x + blockIdx.x*blockDim.x + 1; auto j = threadIdx.y + blockIdx.y*blockDim.y + 1; if(i<nx-1 && j<ny-1) { auto pos = i + j*nx; x1[pos] = x0[pos] + dt * (-4.*x0[pos] + x0[pos-nx] + x0[pos+nx] + x0[pos-1] + x0[pos+1]); } } __global__ void diffusion_shared(double *x0, double *x1, int nx, int ny, double dt) { extern __shared__ double buf[]; auto i = threadIdx.x + blockIdx.x*blockDim.x + 1; auto j = threadIdx.y + blockIdx.y*blockDim.y + 1; auto ib = threadIdx.x + 1; auto jb = threadIdx.y + 1; auto nxb = blockDim.x+2; auto nyb = blockDim.y+2; if(i<nx-1 && j<ny-1) { auto pos = i + j*nx; auto bpos = ib + jb*nxb; buf[bpos] = x0[pos]; if(ib==1) buf[bpos-1] = x0[pos-1]; if(ib==nxb-2) buf[bpos+1] = x0[pos+1]; if(jb==1) buf[bpos-nxb] = x0[pos-nx]; if(jb==nyb-2) buf[bpos+nxb] = x0[pos+nx]; __syncthreads(); x1[pos] = buf[bpos] + dt * (-4.*buf[bpos] + buf[bpos-nxb] + buf[bpos+nxb] + buf[bpos-1] + buf[bpos+1]); } } int main(int argc, char** argv) { // set up parameters // first argument is the y dimension = 2^arg size_t pow = read_arg(argc, argv, 1, 8); // second argument is the number of time steps size_t nsteps = read_arg(argc, argv, 2, 100); // set domain size size_t nx = 128+2; size_t ny = (1 << pow)+2; double dt = 0.1; std::cout << "\n## " << nx << "x" << ny << " for " << nsteps << " time steps" << " (" << nx*ny << " grid points)" << std::endl; // allocate memory on device and host // note : allocate enough memory for the halo around the boundary auto buffer_size = nx*ny; double *x_host = malloc_host_pinned<double>(buffer_size); double *x0 = malloc_device<double>(buffer_size); double *x1 = malloc_device<double>(buffer_size); dim3 block_dims(64, 8); dim3 grid_dims( ((nx-2)+block_dims.x-1) / block_dims.x, ((ny-2)+block_dims.y-1) / block_dims.y ); // set initial conditions of 0 everywhere fill_gpu(x0, 0., buffer_size); fill_gpu(x1, 0., buffer_size); // set boundary conditions of 1 on south border fill_gpu(x0, 1., nx); fill_gpu(x1, 1., nx); fill_gpu(x0+nx*(ny-1), 1., nx); fill_gpu(x1+nx*(ny-1), 1., nx); CudaStream stream; CudaStream copy_stream(true); auto start_event = stream.enqueue_event(); // time stepping loop for(auto step=0; step<nsteps; ++step) { #ifdef USE_SHARED auto shared_size = sizeof(double) * (block_dims.x+2) * (block_dims.y+2); hipLaunchKernelGGL(( diffusion_shared), dim3(grid_dims), dim3(block_dims), shared_size, 0, x0, x1, nx, ny, dt); #else hipLaunchKernelGGL(( diffusion), dim3(grid_dims), dim3(block_dims), 0, 0, x0, x1, nx, ny, dt); #endif std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); stop_event.wait(); copy_to_host<double>(x0, x_host, buffer_size); double time = stop_event.time_since(start_event); std::cout << "## " << time << "s, " << nsteps*(nx-2)*(ny-2) / time << " points/second" << std::endl << std::endl; std::cout << "writing to output.bin/bov" << std::endl; write_to_file(nx, ny, x_host); return 0; } template <typename T> __global__ void fill(T *v, T value, int n) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid<n) { v[tid] = value; } } template <typename T> void fill_gpu(T *v, T value, int n) { auto block_dim = 192ul; auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0); hipLaunchKernelGGL(( fill<T>), dim3(grid_dim), dim3(block_dim), 0, 0, v, value, n); } void write_to_file(int nx, int ny, double* data) { { FILE* output = fopen("output.bin", "w"); fwrite(data, sizeof(double), nx * ny, output); fclose(output); } std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << nx << ", " << ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl; }
50df611a694cf8e051b8b6dde727160a5b1e2d6d.cu
#define USE_SHARED #include <iostream> #include <fstream> #include <cstdio> #include <cuda.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" // 2D diffusion example // the grid has a fixed width of nx=128 // the use specifies the height, ny, as a power of two // note that nx and ny have 2 added to them to account for halos template <typename T> void fill_gpu(T *v, T value, int n); void write_to_file(int nx, int ny, double* data); // NOTE : i-major ordering, i.e. x[i,j] is indexed at location [i+j*nx] // for(i=1; i<nx-1; ++i) // for(j=1; j<ny-1; ++j) // x1[i,j] = x0[i,j] + dt * (-4.*x0[i,j] // + x0[i,j-1] + x0[i,j+1] // + x0[i-1,j] + x0[i+1,j]); __global__ void diffusion(double *x0, double *x1, int nx, int ny, double dt) { auto i = threadIdx.x + blockIdx.x*blockDim.x + 1; auto j = threadIdx.y + blockIdx.y*blockDim.y + 1; if(i<nx-1 && j<ny-1) { auto pos = i + j*nx; x1[pos] = x0[pos] + dt * (-4.*x0[pos] + x0[pos-nx] + x0[pos+nx] + x0[pos-1] + x0[pos+1]); } } __global__ void diffusion_shared(double *x0, double *x1, int nx, int ny, double dt) { extern __shared__ double buf[]; auto i = threadIdx.x + blockIdx.x*blockDim.x + 1; auto j = threadIdx.y + blockIdx.y*blockDim.y + 1; auto ib = threadIdx.x + 1; auto jb = threadIdx.y + 1; auto nxb = blockDim.x+2; auto nyb = blockDim.y+2; if(i<nx-1 && j<ny-1) { auto pos = i + j*nx; auto bpos = ib + jb*nxb; buf[bpos] = x0[pos]; if(ib==1) buf[bpos-1] = x0[pos-1]; if(ib==nxb-2) buf[bpos+1] = x0[pos+1]; if(jb==1) buf[bpos-nxb] = x0[pos-nx]; if(jb==nyb-2) buf[bpos+nxb] = x0[pos+nx]; __syncthreads(); x1[pos] = buf[bpos] + dt * (-4.*buf[bpos] + buf[bpos-nxb] + buf[bpos+nxb] + buf[bpos-1] + buf[bpos+1]); } } int main(int argc, char** argv) { // set up parameters // first argument is the y dimension = 2^arg size_t pow = read_arg(argc, argv, 1, 8); // second argument is the number of time steps size_t nsteps = read_arg(argc, argv, 2, 100); // set domain size size_t nx = 128+2; size_t ny = (1 << pow)+2; double dt = 0.1; std::cout << "\n## " << nx << "x" << ny << " for " << nsteps << " time steps" << " (" << nx*ny << " grid points)" << std::endl; // allocate memory on device and host // note : allocate enough memory for the halo around the boundary auto buffer_size = nx*ny; double *x_host = malloc_host_pinned<double>(buffer_size); double *x0 = malloc_device<double>(buffer_size); double *x1 = malloc_device<double>(buffer_size); dim3 block_dims(64, 8); dim3 grid_dims( ((nx-2)+block_dims.x-1) / block_dims.x, ((ny-2)+block_dims.y-1) / block_dims.y ); // set initial conditions of 0 everywhere fill_gpu(x0, 0., buffer_size); fill_gpu(x1, 0., buffer_size); // set boundary conditions of 1 on south border fill_gpu(x0, 1., nx); fill_gpu(x1, 1., nx); fill_gpu(x0+nx*(ny-1), 1., nx); fill_gpu(x1+nx*(ny-1), 1., nx); CudaStream stream; CudaStream copy_stream(true); auto start_event = stream.enqueue_event(); // time stepping loop for(auto step=0; step<nsteps; ++step) { #ifdef USE_SHARED auto shared_size = sizeof(double) * (block_dims.x+2) * (block_dims.y+2); diffusion_shared<<<grid_dims, block_dims, shared_size>>>(x0, x1, nx, ny, dt); #else diffusion<<<grid_dims, block_dims>>>(x0, x1, nx, ny, dt); #endif std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); stop_event.wait(); copy_to_host<double>(x0, x_host, buffer_size); double time = stop_event.time_since(start_event); std::cout << "## " << time << "s, " << nsteps*(nx-2)*(ny-2) / time << " points/second" << std::endl << std::endl; std::cout << "writing to output.bin/bov" << std::endl; write_to_file(nx, ny, x_host); return 0; } template <typename T> __global__ void fill(T *v, T value, int n) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid<n) { v[tid] = value; } } template <typename T> void fill_gpu(T *v, T value, int n) { auto block_dim = 192ul; auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0); fill<T><<<grid_dim, block_dim>>>(v, value, n); } void write_to_file(int nx, int ny, double* data) { { FILE* output = fopen("output.bin", "w"); fwrite(data, sizeof(double), nx * ny, output); fclose(output); } std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << nx << ", " << ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl; }
c6b48d7f1f2323fb56b252c98fa2f519c8bfd45b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/memory.h" #include "saiga/cuda/reduce.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/core/math/math.h" #include <fstream> #include <random> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; //#define LECTURE std::ofstream outstrm; HD inline uint32_t simpleRand(uint32_t state) { /* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */ uint32_t x = state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; return x; } template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessSimple(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); // sum += data[index]; sum += Saiga::CUDA::ldg(data.data() + index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } #ifndef LECTURE template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessConstRestricted(ArrayView<T> vdata, const T* __restrict__ data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % vdata.size(); sum += data[index]; } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessLdg(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); sum += Saiga::CUDA::ldg(data.data() + index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } static texture<int, 1, hipReadModeElementType> dataTexture; template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessTexture(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); sum += tex1Dfetch(dataTexture, index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } #endif template <typename ElementType> void randomAccessTest2(int numIndices, int numElements) { const int K = 16; outstrm << numIndices * sizeof(int) / 1024 << ","; size_t readWrites = numElements * sizeof(ElementType) / 32 + numElements * sizeof(int) * K; Saiga::CUDA::PerformanceTestHelper test("Coalesced processing test. numIndices: " + std::to_string(numIndices) + " numElements: " + std::to_string(numElements), readWrites); thrust::host_vector<ElementType> data(numIndices); thrust::host_vector<ElementType> result(numElements, 0); thrust::host_vector<ElementType> ref(numElements); thrust::device_vector<ElementType> d_data(data); thrust::device_vector<ElementType> d_result(result); int its = 50; const int BLOCK_SIZE = 128; const int BLOCKS = Saiga::CUDA::getBlockCount(numElements, BLOCK_SIZE); { d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( its, [&]() {hipLaunchKernelGGL(( randomAccessSimple<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); }); test.addMeassurement("randomAccessSimple", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); #ifndef LECTURE { d_result = result; // hipFuncSetCacheConfig(randomAccessConstRestricted<ElementType,BLOCK_SIZE,K>,hipFuncCachePreferShared); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { hipLaunchKernelGGL(( randomAccessConstRestricted<ElementType, BLOCK_SIZE, K>) , dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_data.data().get(), d_result); }); test.addMeassurement("randomAccessConstRestricted", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); { d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { hipLaunchKernelGGL(( randomAccessLdg<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); }); test.addMeassurement("randomAccessLdg", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); #endif { hipBindTexture(0, dataTexture, d_data.data().get(), d_data.size() * sizeof(ElementType)); d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( its, [&]() {hipLaunchKernelGGL(( randomAccessTexture<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); }); test.addMeassurement("randomAccessTexture", st.median); outstrm << test.bandwidth(st.median); hipUnbindTexture(dataTexture); CUDA_SYNC_CHECK_ERROR(); } outstrm << std::endl; return; } int main(int argc, char* argv[]) { // hipDeviceSetCacheConfig(hipFuncCachePreferL1); // hipDeviceSetCacheConfig(hipFuncCachePreferShared); outstrm.open("out.csv"); outstrm << "size,simple,cr,ldg,texture" << std::endl; #ifdef LECTURE int start = 8; int end = 9; randomAccessTest2<int>(1 << 12, 1 * 1024 * 1024); #else int start = 8; int end = 24; for (int i = start; i < end; ++i) { randomAccessTest2<int>(1 << i, 1 * 1024 * 1024); if (i > 0) randomAccessTest2<int>((1 << i) + (1 << (i - 1)), 1 * 1024 * 1024); } #endif CUDA_SYNC_CHECK_ERROR(); return 0; }
c6b48d7f1f2323fb56b252c98fa2f519c8bfd45b.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/memory.h" #include "saiga/cuda/reduce.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/core/math/math.h" #include <fstream> #include <random> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; //#define LECTURE std::ofstream outstrm; HD inline uint32_t simpleRand(uint32_t state) { /* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */ uint32_t x = state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; return x; } template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessSimple(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); // sum += data[index]; sum += Saiga::CUDA::ldg(data.data() + index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } #ifndef LECTURE template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessConstRestricted(ArrayView<T> vdata, const T* __restrict__ data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % vdata.size(); sum += data[index]; } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessLdg(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); sum += Saiga::CUDA::ldg(data.data() + index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } static texture<int, 1, cudaReadModeElementType> dataTexture; template <typename T, unsigned int BLOCK_SIZE, unsigned int K> __global__ static void randomAccessTexture(ArrayView<T> data, ArrayView<T> result) { ThreadInfo<BLOCK_SIZE> ti; if (ti.thread_id >= result.size()) return; uint32_t r = ti.thread_id * 17; T sum = 0; for (int i = 0; i < K; ++i) { r = simpleRand(r); auto index = r % data.size(); sum += tex1Dfetch(dataTexture, index); } // Reduce the cache impact of the output array sum = Saiga::CUDA::warpReduceSum<T>(sum); if (ti.lane_id == 0) result[ti.warp_id] = sum; } #endif template <typename ElementType> void randomAccessTest2(int numIndices, int numElements) { const int K = 16; outstrm << numIndices * sizeof(int) / 1024 << ","; size_t readWrites = numElements * sizeof(ElementType) / 32 + numElements * sizeof(int) * K; Saiga::CUDA::PerformanceTestHelper test("Coalesced processing test. numIndices: " + std::to_string(numIndices) + " numElements: " + std::to_string(numElements), readWrites); thrust::host_vector<ElementType> data(numIndices); thrust::host_vector<ElementType> result(numElements, 0); thrust::host_vector<ElementType> ref(numElements); thrust::device_vector<ElementType> d_data(data); thrust::device_vector<ElementType> d_result(result); int its = 50; const int BLOCK_SIZE = 128; const int BLOCKS = Saiga::CUDA::getBlockCount(numElements, BLOCK_SIZE); { d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( its, [&]() { randomAccessSimple<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result); }); test.addMeassurement("randomAccessSimple", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); #ifndef LECTURE { d_result = result; // cudaFuncSetCacheConfig(randomAccessConstRestricted<ElementType,BLOCK_SIZE,K>,cudaFuncCachePreferShared); auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { randomAccessConstRestricted<ElementType, BLOCK_SIZE, K> <<<BLOCKS, BLOCK_SIZE>>>(d_data, d_data.data().get(), d_result); }); test.addMeassurement("randomAccessConstRestricted", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); { d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { randomAccessLdg<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result); }); test.addMeassurement("randomAccessLdg", st.median); outstrm << test.bandwidth(st.median) << ","; CUDA_SYNC_CHECK_ERROR(); } // SAIGA_ASSERT(ref == d_result); #endif { cudaBindTexture(0, dataTexture, d_data.data().get(), d_data.size() * sizeof(ElementType)); d_result = result; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>( its, [&]() { randomAccessTexture<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result); }); test.addMeassurement("randomAccessTexture", st.median); outstrm << test.bandwidth(st.median); cudaUnbindTexture(dataTexture); CUDA_SYNC_CHECK_ERROR(); } outstrm << std::endl; return; } int main(int argc, char* argv[]) { // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); outstrm.open("out.csv"); outstrm << "size,simple,cr,ldg,texture" << std::endl; #ifdef LECTURE int start = 8; int end = 9; randomAccessTest2<int>(1 << 12, 1 * 1024 * 1024); #else int start = 8; int end = 24; for (int i = start; i < end; ++i) { randomAccessTest2<int>(1 << i, 1 * 1024 * 1024); if (i > 0) randomAccessTest2<int>((1 << i) + (1 << (i - 1)), 1 * 1024 * 1024); } #endif CUDA_SYNC_CHECK_ERROR(); return 0; }
30b5597ae66af8c91e06356381a4bfd3d86036ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" void convLayer_forward(int M, int C, int H, int W, int K, float* X, float* W, float* Y){ int m,c,h,w,p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(m = 0;m<M;m++){//for each output feature maps for(h = 0;j< H_out;h++){//for each output element for(w = 0; w< W_out;w++){ Y[m,h,w] = 0; for(c = 0;c<C;c++){//sum over all input feature maps for(p = 0;p<K;p++){//K*K filter for(q = 0;q<K;q++) Y[m,h,w] += X[c,h + p,w + q] * W[m,c,p,q]; } } } } } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float* X, float* W, float* Y){ int n, m,c,h, w,p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(n = 0; n < N;n++) //for each sample in the mini-batch for(m = 0; m < M;m++) //for each output feature maps for(h = 0; h < H_out; h++) // for each output element for(w = 0; w < W_out; w++){ Y[n,m,h,w] = 0; for(c = 0;c < C;c++)//sum over all input features for(p = 0; p < K;p++) for(q = 0; q < K;q++) Y[n,m,h,w] += X[n,c,h + p, w + q] * W[m,c,p,q]; } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float * X, float* W, float* Y){ int n, m, c,h , w, p,q; int H_out = H - K + 1; int W_out = W - K + 1; parallel_for(n = 0;n < N;n++) parallel_for(m = 0;m < M;m++) parallel_for(h = 0; h < H_out;h++) parallel_for(w = 0; w < W_out; w++){ Y[n,m,h,w] = 0; for(c = 0; c<C;c++) for(p = 0; p< K;p++) for(q = 0; q < K;q++) Y[n,m,h,w] += X[n,c,h + p,w + q] * W[m,c,p,q]; } } __global__ void ConvLayerForward_Kernel(int C, int W_grid, int K, float* X, float* W, float* Y){ int n,m,h,w,c,p,q; n = blockIdx.x; m = blockIdx.y; h = blockIdx.z / W_grid + threadIdx.y; w = blockIdx.z % W_grid + threadIdx.x; float acc = 0; for(c = 0;c < C;c++){//sum over all input channels for(p = 0; p < K;p++) for(q = 0; q < K; q++) acc = acc + X[n,c,h + p, w + q] * W[m,c,p,q]; } Y[n,m,h,w] = acc; } __global__ void ConvLayerForward_Kernel_Shared_Memory(int C,int W_grid, int K, float* X, float* W, float* Y){ int n,m,h0,w0,h_base,w_base,h,w; int X_tile_width = TILE_WIDTH + K - 1; extern __shared__ float shmem[]; float* X_shared = &shmem[0]; float* W_shared = &shmem[X_tile_width * X_tile_width]; n = blockIdx.x; m = blockIdx.y; h0 = threadIdx.x; // h0 and w0 used as shorthand for threadIdx.x and threadIdx.y w0 = threadIdx.y; h_base = (blockIdx.z / W_grid) * TILE_SIZE;//vertical base out data index for the block w_base = (blockIdx.z % W_grid) * TILE_SIZE;//horizontal base out data index for the block h = h_base + h0; w = w_base + w0; float acc = 0; int c,i,j,p,q; for(c = 0;c<C;c++){//sum over all input channels if((h0 < K) && (w0 < K)) W_shared[h0,w0] = W[m,c,h0,w0]; //load weights for W[m,c,..], // h0 and w0 used as shorthand for threadIdx.x and threadIdx.y __syncthreads(); for(i = h;i<h_base + X_tile_width; i += TILE_WIDTH){ for(j = w ; j < w_base + X_tile_width; j += TILE_WIDTH) X_shared[i-h_base,j- w_base] = X[n,c,i,j];//load tile from X[n,c,...] into shared memory } __syncthreads(); for(p = 0;p<K;p++){ for(q = 0; q< K;q++) acc = acc + X_shared[h+p,w+q] * W_shared[p,q]; } __syncthreads(); } Y[n,m,h,w] = acc; } #define TILE_WIDTH 16 void void ConvLayerForward_cuda(){ int W_grid = W_out / TILE_WIDTH; int H_grid = H_out / TILE_WIDTH; Z = H_grid * W_grid; dim3 blockDim(TILE_WIDTH, TILE_WIDTH,1); dim3 gridDim(N,M,Z); hipLaunchKernelGGL(( ConvLayerForward_Kernel), dim3(gridDim),dim3(blockDim), 0, 0, ); } void poolingLayer_forward(int M, int H, int W, int K,float * Y, float* S){ int m,x,y,p,q; for(m = 0;m<M;m++){ for(x = 0;x < H/K;x++){ for(y = 0;y < W/K;y++){ S[m,x,y] = 0.; for(p = 0;p<K;p++){ for(q = 0;q < K;q++) S[m,x,y] = S[m,x,y] + Y[m,K*x + p,K*y + q]/(K*K); } } } //add bias and apply non-linear activation S[m,x,y] = sigmoid(S[m,x,y] + b[m]); } } void convLayer_backward_xgrad(int M, int C, int H_in, int W_in, int K, float * dE_dY, float* W, float * dE_dX) { int m,c,h,w,p,q; int H_out = H_in - K + 1; int W_out = W_in - K + 1; for(c = 0;c < C;c++) for(h = 0;h < H_in;h++) for(w = 0;w < W_in;w++) dE_dX[c,h,w] = 0; for(m = 0; m < M;m++) for(h = 0; h < H_out;h++) for(w = 0; w < W_out;w++) for(c = 0;c < C;c++) for(p = 0; p < K;p++) for(q = 0; q < K;q++) dE_dX[c,h + p, w + q] += dE_dY[m,h,w] * W[m,c,p,q]; } void convLayer_backward_wgrad(int M, int C, int H,int W,int K, float *dE_dY, float *X, float *dE_dW) { int m, c, h, w, p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(m = 0; m < M; m++) for(c = 0; c< C;c++) for(p = 0; p < K; p++) for(q = 0; q < K; q++) dE_dW[m,c,p,q] = 0; for(m = 0; m < M; m++) for(h = 0; h < H_out; h++) for(w = 0; w < W_out; w++) for(c = 0; c< C; c++) for(p = 0; p < K; p++) for(q = 0; q < K; q++) dE_dW[m,c,p,q] += X[c,h + p,w + q] * dE_dY[m,h,w]; } // cuda implementation //matrix multiplication __global__ void unroll_Kernel(int C, int H, int W, int K, float* X,float* X_unroll){ int c,s,h_out,w_out,h_unroll,w_base,p,q; int t = blockIdx.x * CUDA_MAX_NUM_THREADS + threadIdx.x; int H_out = H - K + 1; int W_out = W - K + 1; int W_unroll = H_out * W_out; if(t < C * W_unroll){ c = t / W_unroll; s = t % W_unroll; h_out = s / W_out; w_out = s % W_out; h_unroll = h_out * W_out + w_out; w_base = c * K * K; for(p = 0;p<K;p++) for(q = 0;q<K;q++){ w_unroll = w_base + p * K + q; X_unroll(h_unroll,w_unroll) = X(c,h_out + p,w_out + q); } } } void unroll_gpu(int C, int H, int W, int K, float* X, float* X_unroll){ int H_out = H - K + 1; int W_out = W - K + 1; int num_threads = C * H_out * W_out; int num_blocks = ceil((C * H_out * W_out)/ CUDA_MAX_NUM_THREADS); hipLaunchKernelGGL(( unroll_Kernel), dim3(num_blocks), dim3(CUDA_MAX_NUM_THREADS), 0, 0, ); } void unroll(int C, int H, int W, int K,float* X,float * X_unroll){ int c,h,w,p,q,w_base,w_unroll,h_unroll; int H_out = H - K + 1; int W_out = W - K + 1; for(c = 0;c<C;c++){ w_base = c * (K * K); for(p = 0; p < K; p++) for(q = 0; q < K; q++){ for(h = 0;h<H_out;h++) for(w = 0; w < W_out;w++){ w_unroll = w_base + p * K + q; h_unroll = h * W_out + w; X_unroll(h_unroll,w_unroll) = X(c,h+p,w+q); } } } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float* X, float *W_){ int W_out = W - K + 1; int H_out = H - K + 1; int W_unroll = C * K * K; int H_unroll = H_out * W_out; float* X_unrolled = malloc(W_unroll * H_unroll * sizeof(float)); for(int n = 0;n<N;n++){ unroll(C,H,W,K,n,X,X_unrolled); gemm(H_unroll,M,W_unroll,X_unrolled,W,Y[n]); } }
30b5597ae66af8c91e06356381a4bfd3d86036ca.cu
void convLayer_forward(int M, int C, int H, int W, int K, float* X, float* W, float* Y){ int m,c,h,w,p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(m = 0;m<M;m++){//for each output feature maps for(h = 0;j< H_out;h++){//for each output element for(w = 0; w< W_out;w++){ Y[m,h,w] = 0; for(c = 0;c<C;c++){//sum over all input feature maps for(p = 0;p<K;p++){//K*K filter for(q = 0;q<K;q++) Y[m,h,w] += X[c,h + p,w + q] * W[m,c,p,q]; } } } } } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float* X, float* W, float* Y){ int n, m,c,h, w,p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(n = 0; n < N;n++) //for each sample in the mini-batch for(m = 0; m < M;m++) //for each output feature maps for(h = 0; h < H_out; h++) // for each output element for(w = 0; w < W_out; w++){ Y[n,m,h,w] = 0; for(c = 0;c < C;c++)//sum over all input features for(p = 0; p < K;p++) for(q = 0; q < K;q++) Y[n,m,h,w] += X[n,c,h + p, w + q] * W[m,c,p,q]; } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float * X, float* W, float* Y){ int n, m, c,h , w, p,q; int H_out = H - K + 1; int W_out = W - K + 1; parallel_for(n = 0;n < N;n++) parallel_for(m = 0;m < M;m++) parallel_for(h = 0; h < H_out;h++) parallel_for(w = 0; w < W_out; w++){ Y[n,m,h,w] = 0; for(c = 0; c<C;c++) for(p = 0; p< K;p++) for(q = 0; q < K;q++) Y[n,m,h,w] += X[n,c,h + p,w + q] * W[m,c,p,q]; } } __global__ void ConvLayerForward_Kernel(int C, int W_grid, int K, float* X, float* W, float* Y){ int n,m,h,w,c,p,q; n = blockIdx.x; m = blockIdx.y; h = blockIdx.z / W_grid + threadIdx.y; w = blockIdx.z % W_grid + threadIdx.x; float acc = 0; for(c = 0;c < C;c++){//sum over all input channels for(p = 0; p < K;p++) for(q = 0; q < K; q++) acc = acc + X[n,c,h + p, w + q] * W[m,c,p,q]; } Y[n,m,h,w] = acc; } __global__ void ConvLayerForward_Kernel_Shared_Memory(int C,int W_grid, int K, float* X, float* W, float* Y){ int n,m,h0,w0,h_base,w_base,h,w; int X_tile_width = TILE_WIDTH + K - 1; extern __shared__ float shmem[]; float* X_shared = &shmem[0]; float* W_shared = &shmem[X_tile_width * X_tile_width]; n = blockIdx.x; m = blockIdx.y; h0 = threadIdx.x; // h0 and w0 used as shorthand for threadIdx.x and threadIdx.y w0 = threadIdx.y; h_base = (blockIdx.z / W_grid) * TILE_SIZE;//vertical base out data index for the block w_base = (blockIdx.z % W_grid) * TILE_SIZE;//horizontal base out data index for the block h = h_base + h0; w = w_base + w0; float acc = 0; int c,i,j,p,q; for(c = 0;c<C;c++){//sum over all input channels if((h0 < K) && (w0 < K)) W_shared[h0,w0] = W[m,c,h0,w0]; //load weights for W[m,c,..], // h0 and w0 used as shorthand for threadIdx.x and threadIdx.y __syncthreads(); for(i = h;i<h_base + X_tile_width; i += TILE_WIDTH){ for(j = w ; j < w_base + X_tile_width; j += TILE_WIDTH) X_shared[i-h_base,j- w_base] = X[n,c,i,j];//load tile from X[n,c,...] into shared memory } __syncthreads(); for(p = 0;p<K;p++){ for(q = 0; q< K;q++) acc = acc + X_shared[h+p,w+q] * W_shared[p,q]; } __syncthreads(); } Y[n,m,h,w] = acc; } #define TILE_WIDTH 16 void void ConvLayerForward_cuda(){ int W_grid = W_out / TILE_WIDTH; int H_grid = H_out / TILE_WIDTH; Z = H_grid * W_grid; dim3 blockDim(TILE_WIDTH, TILE_WIDTH,1); dim3 gridDim(N,M,Z); ConvLayerForward_Kernel<<<gridDim,blockDim>>>(); } void poolingLayer_forward(int M, int H, int W, int K,float * Y, float* S){ int m,x,y,p,q; for(m = 0;m<M;m++){ for(x = 0;x < H/K;x++){ for(y = 0;y < W/K;y++){ S[m,x,y] = 0.; for(p = 0;p<K;p++){ for(q = 0;q < K;q++) S[m,x,y] = S[m,x,y] + Y[m,K*x + p,K*y + q]/(K*K); } } } //add bias and apply non-linear activation S[m,x,y] = sigmoid(S[m,x,y] + b[m]); } } void convLayer_backward_xgrad(int M, int C, int H_in, int W_in, int K, float * dE_dY, float* W, float * dE_dX) { int m,c,h,w,p,q; int H_out = H_in - K + 1; int W_out = W_in - K + 1; for(c = 0;c < C;c++) for(h = 0;h < H_in;h++) for(w = 0;w < W_in;w++) dE_dX[c,h,w] = 0; for(m = 0; m < M;m++) for(h = 0; h < H_out;h++) for(w = 0; w < W_out;w++) for(c = 0;c < C;c++) for(p = 0; p < K;p++) for(q = 0; q < K;q++) dE_dX[c,h + p, w + q] += dE_dY[m,h,w] * W[m,c,p,q]; } void convLayer_backward_wgrad(int M, int C, int H,int W,int K, float *dE_dY, float *X, float *dE_dW) { int m, c, h, w, p,q; int H_out = H - K + 1; int W_out = W - K + 1; for(m = 0; m < M; m++) for(c = 0; c< C;c++) for(p = 0; p < K; p++) for(q = 0; q < K; q++) dE_dW[m,c,p,q] = 0; for(m = 0; m < M; m++) for(h = 0; h < H_out; h++) for(w = 0; w < W_out; w++) for(c = 0; c< C; c++) for(p = 0; p < K; p++) for(q = 0; q < K; q++) dE_dW[m,c,p,q] += X[c,h + p,w + q] * dE_dY[m,h,w]; } // cuda implementation //matrix multiplication __global__ void unroll_Kernel(int C, int H, int W, int K, float* X,float* X_unroll){ int c,s,h_out,w_out,h_unroll,w_base,p,q; int t = blockIdx.x * CUDA_MAX_NUM_THREADS + threadIdx.x; int H_out = H - K + 1; int W_out = W - K + 1; int W_unroll = H_out * W_out; if(t < C * W_unroll){ c = t / W_unroll; s = t % W_unroll; h_out = s / W_out; w_out = s % W_out; h_unroll = h_out * W_out + w_out; w_base = c * K * K; for(p = 0;p<K;p++) for(q = 0;q<K;q++){ w_unroll = w_base + p * K + q; X_unroll(h_unroll,w_unroll) = X(c,h_out + p,w_out + q); } } } void unroll_gpu(int C, int H, int W, int K, float* X, float* X_unroll){ int H_out = H - K + 1; int W_out = W - K + 1; int num_threads = C * H_out * W_out; int num_blocks = ceil((C * H_out * W_out)/ CUDA_MAX_NUM_THREADS); unroll_Kernel<<<num_blocks, CUDA_MAX_NUM_THREADS>>>(); } void unroll(int C, int H, int W, int K,float* X,float * X_unroll){ int c,h,w,p,q,w_base,w_unroll,h_unroll; int H_out = H - K + 1; int W_out = W - K + 1; for(c = 0;c<C;c++){ w_base = c * (K * K); for(p = 0; p < K; p++) for(q = 0; q < K; q++){ for(h = 0;h<H_out;h++) for(w = 0; w < W_out;w++){ w_unroll = w_base + p * K + q; h_unroll = h * W_out + w; X_unroll(h_unroll,w_unroll) = X(c,h+p,w+q); } } } } void convLayer_forward(int N, int M, int C, int H, int W, int K, float* X, float *W_){ int W_out = W - K + 1; int H_out = H - K + 1; int W_unroll = C * K * K; int H_unroll = H_out * W_out; float* X_unrolled = malloc(W_unroll * H_unroll * sizeof(float)); for(int n = 0;n<N;n++){ unroll(C,H,W,K,n,X,X_unrolled); gemm(H_unroll,M,W_unroll,X_unrolled,W,Y[n]); } }
2fc77df2bfd1f2dccece19886545dac46a7f8340.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <opencv2/imgcodecs.hpp> #include <opencv2/imgproc.hpp> #include <string> #include <vector> #include "dali/kernels/imgproc/warp_gpu.h" #include "dali/kernels/imgproc/warp/affine.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/dump_diff.h" #include "dali/test/mat2tensor.h" #include "dali/test/test_tensors.h" #include "dali/kernels/scratch.h" #include "dali/kernels/alloc.h" #include "dali/test/dali_test_config.h" #include "dali/core/geom/transform.h" namespace dali { namespace kernels { class WarpPrivateTest { public: template <typename Mapping, int ndim, typename Out, typename In, typename Border> static kernels::warp::WarpSetup<ndim, Out, In> & GetSetup(kernels::WarpGPU<Mapping, ndim, Out, In, Border> &kernel) { return kernel.setup; } }; TEST(WarpGPU, check_kernel) { check_kernel<WarpGPU<AffineMapping2D, 2, float, uint8_t, float>>(); SUCCEED(); } void WarpGPU_Affine_Transpose(bool force_variable) { AffineMapping2D mapping_cpu = mat2x3{{ { 0, 1, 0 }, { 1, 0, 0 } }}; cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/alley.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(1, 3); in_list.shape.set_tensor_shape(0, img_tensor.shape); in_list.data[0] = img_tensor.data; WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, BorderClamp> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1); TensorShape<2> out_shape = { img_tensor.shape[1], img_tensor.shape[0] }; KernelContext ctx = {}; auto out_shapes_hw = make_span<1>(&out_shape); auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 }); copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 })); auto interp = DALI_INTERP_NN; KernelRequirements req; if (force_variable) { auto &setup = WarpPrivateTest::GetSetup(warp); setup.SetBlockDim(dim3(32, 8, 1)); auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw); req = setup.Setup(out_shapes, true); req.scratch_sizes[static_cast<int>(AllocType::GPU)] += sizeof(warp::SampleDesc<2, int, int>); } else { req = warp.Setup(ctx, in_list, mappings, out_shapes_hw, {&interp, 1}); } scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}); auto cpu_out = out.cpu(0)[0]; hipDeviceSynchronize(); ASSERT_EQ(cpu_out.shape[0], img_tensor.shape[1]); ASSERT_EQ(cpu_out.shape[1], img_tensor.shape[0]); ASSERT_EQ(cpu_out.shape[2], 3); int errors = 0; int printed = 0; for (int y = 0; y < cpu_out.shape[0]; y++) { for (int x = 0; x < cpu_out.shape[1]; x++) { for (int c = 0; c < 3; c++) { if (*cpu_out(y, x, c) != *cpu_img(x, y, c)) { if (errors++ < 100) { printed++; EXPECT_EQ(*cpu_out(y, x, c), *cpu_img(x, y, c)) << "@ x = " << x << " y = " << y << " c = " << c; } } } } } if (printed != errors) { FAIL() << (errors - printed) << " more erors."; } } TEST(WarpGPU, Affine_Transpose_ForceVariable) { WarpGPU_Affine_Transpose(true); } TEST(WarpGPU, Affine_Transpose_Single) { WarpGPU_Affine_Transpose(false); } /** * @brief Apply correction of pixel centers and convert the mapping to * OpenCV matrix type. */ inline cv::Matx<float, 2, 3> AffineToCV(const AffineMapping2D &mapping) { vec2 translation = mapping({0.5f, 0.5f}) - vec2(0.5f, 0.5f); mat2x3 tmp = mapping.transform; tmp.set_col(2, translation); cv::Matx<float, 2, 3> cv_transform; for (int i = 0; i < 2; i++) for (int j = 0; j < 3; j++) cv_transform(i, j) = tmp(i, j); return cv_transform; } TEST(WarpGPU, Affine_RotateScale_Single) { cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f); int scale = 10; auto tr = translation(center) * rotation2D(-M_PI/4) * translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale)); AffineMapping2D mapping_cpu = sub<2, 3>(tr, 0, 0); TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(1, 3); in_list.shape.set_tensor_shape(0, img_tensor.shape); in_list.data[0] = img_tensor.data; WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1); TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale }; KernelContext ctx = {}; auto out_shapes_hw = make_span<1>(&out_shape); auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 }); copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 })); auto interp = DALI_INTERP_LINEAR; auto &setup = WarpPrivateTest::GetSetup(warp); auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw); setup.SetBlockDim(dim3(32, 24, 1)); // force non-square block KernelRequirements req = setup.Setup(out_shapes, true); scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}, 255); auto cpu_out = out.cpu(0)[0]; hipDeviceSynchronize(); ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[0][0]); ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[0][1]); ASSERT_EQ(cpu_out.shape[2], 3); cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data); cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu); cv::Mat cv_ref; cv::warpAffine(cv_img, cv_ref, cv_transform, cv::Size(out_shape[1], out_shape[0]), cv::INTER_LINEAR|cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255)); auto ref_img = view_as_tensor<uint8_t>(cv_ref); Check(cpu_out, ref_img, EqualEps(8)); if (HasFailure()) testing::DumpDiff("WarpAffine_RotateScale", cv_out, cv_ref); } TEST(WarpGPU, Affine_RotateScale_Uniform) { cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f); const int samples = 10; std::vector<AffineMapping2D> mapping_cpu(samples); int scale = 10; TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(samples, 3); for (int i = 0; i < samples; i++) { in_list.shape.set_tensor_shape(i, img_tensor.shape); in_list.data[i] = img_tensor.data; auto tr = translation(center) * rotation2D(-2*M_PI * i / samples) * translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale)); mapping_cpu[i] = sub<2, 3>(tr, 0, 0); } WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, samples); TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale }; KernelContext ctx = {}; std::vector<TensorShape<2>> out_shapes_hw(samples); for (int i = 0; i < samples; i++) out_shapes_hw[i] = out_shape; auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { samples }); copy(mappings, make_tensor_cpu<1>(mapping_cpu.data(), { samples })); auto interp = DALI_INTERP_LINEAR; KernelRequirements req = warp.Setup( ctx, in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255); scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255); hipDeviceSynchronize(); for (int i = 0; i < samples; i++) { auto cpu_out = out.cpu(0)[i]; ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[i][0]); ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[i][1]); ASSERT_EQ(cpu_out.shape[2], 3); cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data); cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu[i]); cv::Mat cv_ref; cv::warpAffine(cv_img, cv_ref, cv_transform, cv::Size(out_shape[1], out_shape[0]), cv::INTER_LINEAR|cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255)); auto ref_img = view_as_tensor<uint8_t>(cv_ref); Check(cpu_out, ref_img, EqualEps(8)); if (HasFailure()) { auto name = "Warp_Affine_RotateScale_" + std::to_string(i); testing::DumpDiff(name, cv_out, cv_ref); } } } } // namespace kernels } // namespace dali
2fc77df2bfd1f2dccece19886545dac46a7f8340.cu
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <opencv2/imgcodecs.hpp> #include <opencv2/imgproc.hpp> #include <string> #include <vector> #include "dali/kernels/imgproc/warp_gpu.h" #include "dali/kernels/imgproc/warp/affine.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/dump_diff.h" #include "dali/test/mat2tensor.h" #include "dali/test/test_tensors.h" #include "dali/kernels/scratch.h" #include "dali/kernels/alloc.h" #include "dali/test/dali_test_config.h" #include "dali/core/geom/transform.h" namespace dali { namespace kernels { class WarpPrivateTest { public: template <typename Mapping, int ndim, typename Out, typename In, typename Border> static kernels::warp::WarpSetup<ndim, Out, In> & GetSetup(kernels::WarpGPU<Mapping, ndim, Out, In, Border> &kernel) { return kernel.setup; } }; TEST(WarpGPU, check_kernel) { check_kernel<WarpGPU<AffineMapping2D, 2, float, uint8_t, float>>(); SUCCEED(); } void WarpGPU_Affine_Transpose(bool force_variable) { AffineMapping2D mapping_cpu = mat2x3{{ { 0, 1, 0 }, { 1, 0, 0 } }}; cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/alley.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(1, 3); in_list.shape.set_tensor_shape(0, img_tensor.shape); in_list.data[0] = img_tensor.data; WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, BorderClamp> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1); TensorShape<2> out_shape = { img_tensor.shape[1], img_tensor.shape[0] }; KernelContext ctx = {}; auto out_shapes_hw = make_span<1>(&out_shape); auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 }); copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 })); auto interp = DALI_INTERP_NN; KernelRequirements req; if (force_variable) { auto &setup = WarpPrivateTest::GetSetup(warp); setup.SetBlockDim(dim3(32, 8, 1)); auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw); req = setup.Setup(out_shapes, true); req.scratch_sizes[static_cast<int>(AllocType::GPU)] += sizeof(warp::SampleDesc<2, int, int>); } else { req = warp.Setup(ctx, in_list, mappings, out_shapes_hw, {&interp, 1}); } scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}); auto cpu_out = out.cpu(0)[0]; cudaDeviceSynchronize(); ASSERT_EQ(cpu_out.shape[0], img_tensor.shape[1]); ASSERT_EQ(cpu_out.shape[1], img_tensor.shape[0]); ASSERT_EQ(cpu_out.shape[2], 3); int errors = 0; int printed = 0; for (int y = 0; y < cpu_out.shape[0]; y++) { for (int x = 0; x < cpu_out.shape[1]; x++) { for (int c = 0; c < 3; c++) { if (*cpu_out(y, x, c) != *cpu_img(x, y, c)) { if (errors++ < 100) { printed++; EXPECT_EQ(*cpu_out(y, x, c), *cpu_img(x, y, c)) << "@ x = " << x << " y = " << y << " c = " << c; } } } } } if (printed != errors) { FAIL() << (errors - printed) << " more erors."; } } TEST(WarpGPU, Affine_Transpose_ForceVariable) { WarpGPU_Affine_Transpose(true); } TEST(WarpGPU, Affine_Transpose_Single) { WarpGPU_Affine_Transpose(false); } /** * @brief Apply correction of pixel centers and convert the mapping to * OpenCV matrix type. */ inline cv::Matx<float, 2, 3> AffineToCV(const AffineMapping2D &mapping) { vec2 translation = mapping({0.5f, 0.5f}) - vec2(0.5f, 0.5f); mat2x3 tmp = mapping.transform; tmp.set_col(2, translation); cv::Matx<float, 2, 3> cv_transform; for (int i = 0; i < 2; i++) for (int j = 0; j < 3; j++) cv_transform(i, j) = tmp(i, j); return cv_transform; } TEST(WarpGPU, Affine_RotateScale_Single) { cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f); int scale = 10; auto tr = translation(center) * rotation2D(-M_PI/4) * translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale)); AffineMapping2D mapping_cpu = sub<2, 3>(tr, 0, 0); TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(1, 3); in_list.shape.set_tensor_shape(0, img_tensor.shape); in_list.data[0] = img_tensor.data; WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1); TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale }; KernelContext ctx = {}; auto out_shapes_hw = make_span<1>(&out_shape); auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 }); copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 })); auto interp = DALI_INTERP_LINEAR; auto &setup = WarpPrivateTest::GetSetup(warp); auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw); setup.SetBlockDim(dim3(32, 24, 1)); // force non-square block KernelRequirements req = setup.Setup(out_shapes, true); scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}, 255); auto cpu_out = out.cpu(0)[0]; cudaDeviceSynchronize(); ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[0][0]); ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[0][1]); ASSERT_EQ(cpu_out.shape[2], 3); cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data); cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu); cv::Mat cv_ref; cv::warpAffine(cv_img, cv_ref, cv_transform, cv::Size(out_shape[1], out_shape[0]), cv::INTER_LINEAR|cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255)); auto ref_img = view_as_tensor<uint8_t>(cv_ref); Check(cpu_out, ref_img, EqualEps(8)); if (HasFailure()) testing::DumpDiff("WarpAffine_RotateScale", cv_out, cv_ref); } TEST(WarpGPU, Affine_RotateScale_Uniform) { cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png"); auto cpu_img = view_as_tensor<uint8_t>(cv_img); auto gpu_img = copy<AllocType::GPU>(cpu_img); auto img_tensor = gpu_img.first; vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f); const int samples = 10; std::vector<AffineMapping2D> mapping_cpu(samples); int scale = 10; TensorListView<StorageGPU, uint8_t, 3> in_list; in_list.resize(samples, 3); for (int i = 0; i < samples; i++) { in_list.shape.set_tensor_shape(i, img_tensor.shape); in_list.data[i] = img_tensor.data; auto tr = translation(center) * rotation2D(-2*M_PI * i / samples) * translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale)); mapping_cpu[i] = sub<2, 3>(tr, 0, 0); } WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp; ScratchpadAllocator scratch_alloc; auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, samples); TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale }; KernelContext ctx = {}; std::vector<TensorShape<2>> out_shapes_hw(samples); for (int i = 0; i < samples; i++) out_shapes_hw[i] = out_shape; auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { samples }); copy(mappings, make_tensor_cpu<1>(mapping_cpu.data(), { samples })); auto interp = DALI_INTERP_LINEAR; KernelRequirements req = warp.Setup( ctx, in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255); scratch_alloc.Reserve(req.scratch_sizes); TestTensorList<uint8_t, 3> out; out.reshape(req.output_shapes[0].to_static<3>()); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; warp.Run(ctx, out.gpu(0), in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255); cudaDeviceSynchronize(); for (int i = 0; i < samples; i++) { auto cpu_out = out.cpu(0)[i]; ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[i][0]); ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[i][1]); ASSERT_EQ(cpu_out.shape[2], 3); cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data); cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu[i]); cv::Mat cv_ref; cv::warpAffine(cv_img, cv_ref, cv_transform, cv::Size(out_shape[1], out_shape[0]), cv::INTER_LINEAR|cv::WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255)); auto ref_img = view_as_tensor<uint8_t>(cv_ref); Check(cpu_out, ref_img, EqualEps(8)); if (HasFailure()) { auto name = "Warp_Affine_RotateScale_" + std::to_string(i); testing::DumpDiff(name, cv_out, cv_ref); } } } } // namespace kernels } // namespace dali
7d2b27cf19e1d9d3198fac367999acb40462fa0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef PADDLE_WITH_HIP #include <hiprand.h> #include <hiprand_kernel.h> #include <hipcub/hipcub.hpp> typedef hiprandState hiprandState_t; namespace cub = hipcub; #else #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hipcub/hipcub.hpp> #endif #include <iterator> #include <random> #include "paddle/fluid/operators/class_center_sample_op.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { #define CUDA_KERNEL_LOOP(i, n) \ for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \ step = blockDim.x * gridDim.x; \ i < (n); i += step) using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; inline int32_t NumBlocks(const int32_t n) { return ::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void RandomSampleClassCenter(const int64_t n, int64_t seed, int64_t increment, const int64_t max_val, T* buffer) { const int id = blockIdx.x * blockDim.x + threadIdx.x; hiprandState_t localState; size_t local_seed = (static_cast<size_t>(seed) + 0x9E3779B9U + (static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U)); #ifdef PADDLE_WITH_HIP hiprand_init(local_seed, id, increment, &localState); CUDA_KERNEL_LOOP(i, n) { buffer[i] = static_cast<T>(hiprand(&localState) % max_val); } #else hiprand_init(local_seed, id, increment, &localState); CUDA_KERNEL_LOOP(i, n) { buffer[i] = static_cast<T>(hiprand(&localState) % max_val); } #endif } template <typename T> __global__ void Range(const int64_t n, T* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); } } template <typename T> __global__ void MarkPositiveClassCenter(const int64_t n, const int64_t rank, const T* class_interval_ptr, const int num_classes, const T* labels, T* out) { CUDA_KERNEL_LOOP(i, n) { T label = labels[i] - class_interval_ptr[rank]; if (label >= 0 && label < num_classes) { out[label] = label - num_classes; } } } template <typename T> __device__ void FindIntervalIndex(const T* class_interval_ptr, const int64_t nranks, const T value, int64_t* find_index) { int64_t start = 0; int64_t end = nranks; int64_t mid = ((end - start) >> 1) + start + 1; while (start < end) { if (class_interval_ptr[mid] == value) break; if (class_interval_ptr[mid] > value) end = mid - 1; else start = mid; mid = ((end - start) >> 1) + start + 1; } *find_index = min(mid, end); } template <typename T> __global__ void GetClassCenterBound(const int64_t n, const int64_t nranks, const T* class_interval_ptr, const T* key_ptr, const T* value_ptr, T* bound_index, T* bound_value) { CUDA_KERNEL_LOOP(i, n) { if (i != 0) { int64_t cur_index, pre_index; FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index); FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index); if (cur_index > pre_index) { assert(cur_index < nranks); #pragma unroll for (int32_t j = pre_index + 1; j <= cur_index; ++j) { bound_index[j] = static_cast<T>(i); bound_value[j] = value_ptr[i]; } } } } CUDA_KERNEL_LOOP(i, nranks + 1) { int64_t first_index, last_index; FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index); FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index); if (i <= first_index) { bound_index[i] = 0; bound_value[i] = value_ptr[0]; } else if (i > last_index) { bound_index[i] = n; bound_value[i] = value_ptr[n - 1] + 1; } } } template <typename T> __global__ void GetRemappedLabel(const int64_t n, const int64_t nranks, const T* sampled_class_interval_ptr, const T* bound_index, const T* bound_value, const T* label_map_key, T* label_map_value, T* mapped_label) { CUDA_KERNEL_LOOP(i, n) { #pragma unroll for (int64_t j = 0; j < nranks; j++) { if (i >= bound_index[j] && i < bound_index[j + 1]) { label_map_value[i] = label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j]; } } mapped_label[label_map_key[i]] = label_map_value[i]; } } // aligned vector generates vectorized load/store on CUDA template <typename T, int Size> struct alignas(sizeof(T) * Size) AlignedVector { T val[Size]; }; template <typename T> inline int VectorizedSize(const T* pointer) { uint64_t address = reinterpret_cast<uint64_t>(pointer); constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT if (address % vec4 == 0) { return 4; } return 1; } #undef CUDA_KERNEL_LOOP template <typename T> class NotEqualToPreviousAdjacentIterator { public: using self_type = NotEqualToPreviousAdjacentIterator; using value_type = T; using difference_type = std::ptrdiff_t; using pointer = T*; using reference = T; using iterator_category = std::input_iterator_tag; public: __host__ __device__ __forceinline__ NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset) : arr_(arr), offset_(offset) {} __host__ __device__ __forceinline__ reference operator*() const { return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1); } template <typename Distance> __host__ __device__ __forceinline__ self_type operator+(Distance n) const { self_type ret(arr_, offset_ + n); return ret; } template <typename Distance> __host__ __device__ __forceinline__ reference operator[](Distance n) const { return *(*this + n); } private: const T* arr_; int64_t offset_; }; template <typename T> struct ActualNumSampledFunctor { __host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const { return max(num_samples, (b - a)); } T num_samples; explicit ActualNumSampledFunctor(const T num) : num_samples(num) {} }; template <typename T> class MemoryBuffer { public: MemoryBuffer(const int num_buffer_ele, const int num_temp_ele, const int nranks, const platform::Place& place) { offset1 = 0; offset2 = offset1 + num_buffer_ele; offset3 = offset2 + num_buffer_ele; offset4 = offset3 + num_buffer_ele; offset5 = offset4 + num_buffer_ele; offset6 = offset5 + (nranks + 1); offset7 = offset6 + (nranks + 1); offset8 = offset7 + (nranks + 1); offset9 = offset8 + num_temp_ele; buffer_ptr = buffer.mutable_data<T>( {4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele}, place); } T* cub_sort_keys_ptr() { return buffer_ptr + offset1; } T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; } T* cub_sort_values_ptr() { return buffer_ptr + offset3; } T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; } T* bound_index_ptr() { return buffer_ptr + offset5; } T* bound_value_ptr() { return buffer_ptr + offset6; } T* class_interval_ptr() { return buffer_ptr + offset7; } void* cub_temp_storage_ptr() { return reinterpret_cast<void*>(buffer_ptr + offset8); } private: Tensor buffer; T* buffer_ptr; int offset1; int offset2; int offset3; int offset4; int offset5; int offset6; int offset7; int offset8; int offset9; }; template <typename DeviceContext, typename T> class ClassCenterSampleCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* label = ctx.Input<Tensor>("Label"); auto* remapped_label = ctx.Output<Tensor>("RemappedLabel"); auto* sampled_local_class_center = ctx.Output<Tensor>("SampledLocalClassCenter"); int num_classes = ctx.Attr<int>("num_classes"); int num_samples = ctx.Attr<int>("num_samples"); int rid = ctx.Attr<int>("ring_id"); int nranks = ctx.Attr<int>("nranks"); int rank = ctx.Attr<int>("rank"); int seed = ctx.Attr<int>("seed"); bool fix_seed = ctx.Attr<bool>("fix_seed"); PADDLE_ENFORCE_GT(num_classes, 0, platform::errors::InvalidArgument( "The value 'num_classes' for Op(class_center_sample) " "must be greater than 0, " "but the value given is %d.", num_classes)); PADDLE_ENFORCE_GT(num_samples, 0, platform::errors::InvalidArgument( "The value 'num_samples' for Op(class_center_sample) " "must be greater than 0, " "but the value given is %d.", num_samples)); PADDLE_ENFORCE_LE(num_samples, num_classes, platform::errors::InvalidArgument( "The value 'num_samples' for Op(class_center_sample) " "must be less than or equal to %d, " "but the value given is %d.", num_classes, num_samples)); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto place = dev_ctx.GetPlace(); int batch_size = label->numel(); // Algorithm: // We first randomly generate a value in [0, num_classes) on each position // in a array(shape[num_classes]). Then, we mark the element as negative // value in the array according input label. Now, we can sort the array // by ascending to ensure that the positive class center always in the // front of the sorted array. So, we can get the sampled class center // index by sorted keys. Finally, we can get the rempped label by remap // the input label according sampled class center. // step 1: Calculate num classes per device using nccl all reduce std::vector<T> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = num_classes; Tensor num_classes_per_device; framework::TensorFromVector(shard_dim_vec, ctx.cuda_device_context(), &num_classes_per_device); T* num_classes_per_device_ptr = num_classes_per_device.data<T>(); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { const auto& comm = platform::NCCLCommContext::Instance().Get(rid, ctx.GetPlace()); // use global calculate stream const auto calcu_stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(ctx.GetPlace())) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType(num_classes_per_device.type()), ncclSum, comm->comm(), calcu_stream)); } #endif // step 2: Determine temporary device storage requirements int num_buffer_ele = ::max(batch_size, num_classes); size_t cub_sort_temp_store_size = 0; PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>( nullptr, cub_sort_temp_store_size, nullptr, nullptr, nullptr, nullptr, num_buffer_ele, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); size_t cub_sum_temp_store_size = 0; NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0); PADDLE_ENFORCE_GPU_SUCCESS( (hipcub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>( nullptr, cub_sum_temp_store_size, unique_counting_iter_temp, nullptr, batch_size, ctx.cuda_device_context().stream()))); size_t cub_scan_temp_store_size = 0; ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples); PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveScan( nullptr, cub_scan_temp_store_size, num_classes_per_device_ptr, num_classes_per_device_ptr, actual_num_sampled_op_temp, nranks + 1, ctx.cuda_device_context().stream()))); size_t cub_temp_storage_bytes = ::max(::max(cub_sort_temp_store_size, cub_scan_temp_store_size), cub_sum_temp_store_size); int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1; // step 3: Alloc buffer memory so that we can reuse allocated memory MemoryBuffer<T> memory_buffer = MemoryBuffer<T>(num_buffer_ele, num_temp_ele, nranks, ctx.GetPlace()); T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr(); T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr(); T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr(); T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr(); T* bound_index_ptr = memory_buffer.bound_index_ptr(); T* bound_value_ptr = memory_buffer.bound_value_ptr(); T* class_interval_ptr = memory_buffer.class_interval_ptr(); void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr(); // step 4: Calculate class interval among nranks PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); // step 5: random sample negative class center uint64_t seed_data; uint64_t increment; int vec_size = VectorizedSize<T>(cub_sort_keys_ptr); auto offset = ((num_classes - 1) / (NumBlocks(num_classes) * kNumCUDAThreads * vec_size) + 1) * vec_size; int device_id = ctx.GetPlace().GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && (!fix_seed)) { auto seed_offset = gen_cuda->IncrementOffset(offset); seed_data = seed_offset.first; increment = seed_offset.second; } else { std::random_device rnd; seed_data = fix_seed ? seed + rank : rnd(); increment = offset; } hipLaunchKernelGGL(( RandomSampleClassCenter<T>), dim3(NumBlocks(num_classes)), dim3(kNumCUDAThreads), 0, ctx.cuda_device_context().stream(), num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr); // step 6: mark positive class center as negative value // fill the sort values to index 0, 1, ..., batch_size-1 hipLaunchKernelGGL(( MarkPositiveClassCenter), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, ctx.cuda_device_context().stream(), batch_size, rank, class_interval_ptr, num_classes, label->data<T>(), cub_sort_keys_ptr); hipLaunchKernelGGL(( Range<T>), dim3(NumBlocks(num_buffer_ele)), dim3(kNumCUDAThreads), 0, ctx.cuda_device_context().stream(), num_buffer_ele, cub_sort_values_ptr); // step 7: sort class center by ascending, so that positive class center // always be sampled. PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>( cub_temp_storage_ptr, cub_temp_storage_bytes, cub_sort_keys_ptr, cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_values_out_ptr, num_classes, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); // step 8: sort input label ascending PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>( cub_temp_storage_ptr, cub_temp_storage_bytes, label->data<T>(), cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_keys_ptr, batch_size, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); // step 9: Calculate new index using InclusiveSum on ascending sorted input // label NotEqualToPreviousAdjacentIterator<T> unique_counting_iter( cub_sort_keys_out_ptr, 0); PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum< NotEqualToPreviousAdjacentIterator<T>, T*>( cub_temp_storage_ptr, cub_temp_storage_bytes, unique_counting_iter, cub_sort_values_ptr, batch_size, ctx.cuda_device_context().stream()))); // step 10: Calculate new class center bound among ranks hipLaunchKernelGGL(( GetClassCenterBound<T>), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, ctx.cuda_device_context().stream(), batch_size, nranks, class_interval_ptr, cub_sort_keys_out_ptr, cub_sort_values_ptr, bound_index_ptr, bound_value_ptr); // step 11: Calculate actual number of sampled class per device. // Since maybe num_positive_class_center > num_samples, // we need to ensure all positive class center per device are sampled. ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples); PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveScan( cub_temp_storage_ptr, cub_temp_storage_bytes, bound_value_ptr, num_classes_per_device_ptr, actual_num_sampled_op, nranks + 1, ctx.cuda_device_context().stream()))); // step 12: Calculate actual sampled class interval among nranks PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); // step 13: Get remapped label for output hipLaunchKernelGGL(( GetRemappedLabel<T>), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, ctx.cuda_device_context().stream(), batch_size, nranks, class_interval_ptr, bound_index_ptr, bound_value_ptr, cub_sort_keys_ptr, cub_sort_values_ptr, remapped_label->mutable_data<T>(ctx.GetPlace())); // step 14: Get sampled class center for output framework::TensorCopySync(num_classes_per_device, platform::CPUPlace(), &num_classes_per_device); T actual_num_samples = num_classes_per_device.data<T>()[rank + 1]; T* sampled_local_class_center_ptr = sampled_local_class_center->mutable_data<T>({actual_num_samples}, ctx.GetPlace()); memory::Copy(place, sampled_local_class_center_ptr, place, cub_sort_values_out_ptr, actual_num_samples * sizeof(T), nullptr); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( class_center_sample, ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int>);
7d2b27cf19e1d9d3198fac367999acb40462fa0d.cu
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef PADDLE_WITH_HIP #include <hiprand.h> #include <hiprand_kernel.h> #include <hipcub/hipcub.hpp> typedef hiprandState curandState; namespace cub = hipcub; #else #include <curand.h> #include <curand_kernel.h> #include <cub/cub.cuh> #endif #include <iterator> #include <random> #include "paddle/fluid/operators/class_center_sample_op.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { #define CUDA_KERNEL_LOOP(i, n) \ for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \ step = blockDim.x * gridDim.x; \ i < (n); i += step) using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; inline int32_t NumBlocks(const int32_t n) { return std::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void RandomSampleClassCenter(const int64_t n, int64_t seed, int64_t increment, const int64_t max_val, T* buffer) { const int id = blockIdx.x * blockDim.x + threadIdx.x; curandState localState; size_t local_seed = (static_cast<size_t>(seed) + 0x9E3779B9U + (static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U)); #ifdef PADDLE_WITH_HIP hiprand_init(local_seed, id, increment, &localState); CUDA_KERNEL_LOOP(i, n) { buffer[i] = static_cast<T>(hiprand(&localState) % max_val); } #else curand_init(local_seed, id, increment, &localState); CUDA_KERNEL_LOOP(i, n) { buffer[i] = static_cast<T>(curand(&localState) % max_val); } #endif } template <typename T> __global__ void Range(const int64_t n, T* out) { CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); } } template <typename T> __global__ void MarkPositiveClassCenter(const int64_t n, const int64_t rank, const T* class_interval_ptr, const int num_classes, const T* labels, T* out) { CUDA_KERNEL_LOOP(i, n) { T label = labels[i] - class_interval_ptr[rank]; if (label >= 0 && label < num_classes) { out[label] = label - num_classes; } } } template <typename T> __device__ void FindIntervalIndex(const T* class_interval_ptr, const int64_t nranks, const T value, int64_t* find_index) { int64_t start = 0; int64_t end = nranks; int64_t mid = ((end - start) >> 1) + start + 1; while (start < end) { if (class_interval_ptr[mid] == value) break; if (class_interval_ptr[mid] > value) end = mid - 1; else start = mid; mid = ((end - start) >> 1) + start + 1; } *find_index = min(mid, end); } template <typename T> __global__ void GetClassCenterBound(const int64_t n, const int64_t nranks, const T* class_interval_ptr, const T* key_ptr, const T* value_ptr, T* bound_index, T* bound_value) { CUDA_KERNEL_LOOP(i, n) { if (i != 0) { int64_t cur_index, pre_index; FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index); FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index); if (cur_index > pre_index) { assert(cur_index < nranks); #pragma unroll for (int32_t j = pre_index + 1; j <= cur_index; ++j) { bound_index[j] = static_cast<T>(i); bound_value[j] = value_ptr[i]; } } } } CUDA_KERNEL_LOOP(i, nranks + 1) { int64_t first_index, last_index; FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index); FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index); if (i <= first_index) { bound_index[i] = 0; bound_value[i] = value_ptr[0]; } else if (i > last_index) { bound_index[i] = n; bound_value[i] = value_ptr[n - 1] + 1; } } } template <typename T> __global__ void GetRemappedLabel(const int64_t n, const int64_t nranks, const T* sampled_class_interval_ptr, const T* bound_index, const T* bound_value, const T* label_map_key, T* label_map_value, T* mapped_label) { CUDA_KERNEL_LOOP(i, n) { #pragma unroll for (int64_t j = 0; j < nranks; j++) { if (i >= bound_index[j] && i < bound_index[j + 1]) { label_map_value[i] = label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j]; } } mapped_label[label_map_key[i]] = label_map_value[i]; } } // aligned vector generates vectorized load/store on CUDA template <typename T, int Size> struct alignas(sizeof(T) * Size) AlignedVector { T val[Size]; }; template <typename T> inline int VectorizedSize(const T* pointer) { uint64_t address = reinterpret_cast<uint64_t>(pointer); constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT if (address % vec4 == 0) { return 4; } return 1; } #undef CUDA_KERNEL_LOOP template <typename T> class NotEqualToPreviousAdjacentIterator { public: using self_type = NotEqualToPreviousAdjacentIterator; using value_type = T; using difference_type = std::ptrdiff_t; using pointer = T*; using reference = T; using iterator_category = std::input_iterator_tag; public: __host__ __device__ __forceinline__ NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset) : arr_(arr), offset_(offset) {} __host__ __device__ __forceinline__ reference operator*() const { return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1); } template <typename Distance> __host__ __device__ __forceinline__ self_type operator+(Distance n) const { self_type ret(arr_, offset_ + n); return ret; } template <typename Distance> __host__ __device__ __forceinline__ reference operator[](Distance n) const { return *(*this + n); } private: const T* arr_; int64_t offset_; }; template <typename T> struct ActualNumSampledFunctor { __host__ __device__ __forceinline__ T operator()(const T& a, const T& b) const { return max(num_samples, (b - a)); } T num_samples; explicit ActualNumSampledFunctor(const T num) : num_samples(num) {} }; template <typename T> class MemoryBuffer { public: MemoryBuffer(const int num_buffer_ele, const int num_temp_ele, const int nranks, const platform::Place& place) { offset1 = 0; offset2 = offset1 + num_buffer_ele; offset3 = offset2 + num_buffer_ele; offset4 = offset3 + num_buffer_ele; offset5 = offset4 + num_buffer_ele; offset6 = offset5 + (nranks + 1); offset7 = offset6 + (nranks + 1); offset8 = offset7 + (nranks + 1); offset9 = offset8 + num_temp_ele; buffer_ptr = buffer.mutable_data<T>( {4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele}, place); } T* cub_sort_keys_ptr() { return buffer_ptr + offset1; } T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; } T* cub_sort_values_ptr() { return buffer_ptr + offset3; } T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; } T* bound_index_ptr() { return buffer_ptr + offset5; } T* bound_value_ptr() { return buffer_ptr + offset6; } T* class_interval_ptr() { return buffer_ptr + offset7; } void* cub_temp_storage_ptr() { return reinterpret_cast<void*>(buffer_ptr + offset8); } private: Tensor buffer; T* buffer_ptr; int offset1; int offset2; int offset3; int offset4; int offset5; int offset6; int offset7; int offset8; int offset9; }; template <typename DeviceContext, typename T> class ClassCenterSampleCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* label = ctx.Input<Tensor>("Label"); auto* remapped_label = ctx.Output<Tensor>("RemappedLabel"); auto* sampled_local_class_center = ctx.Output<Tensor>("SampledLocalClassCenter"); int num_classes = ctx.Attr<int>("num_classes"); int num_samples = ctx.Attr<int>("num_samples"); int rid = ctx.Attr<int>("ring_id"); int nranks = ctx.Attr<int>("nranks"); int rank = ctx.Attr<int>("rank"); int seed = ctx.Attr<int>("seed"); bool fix_seed = ctx.Attr<bool>("fix_seed"); PADDLE_ENFORCE_GT(num_classes, 0, platform::errors::InvalidArgument( "The value 'num_classes' for Op(class_center_sample) " "must be greater than 0, " "but the value given is %d.", num_classes)); PADDLE_ENFORCE_GT(num_samples, 0, platform::errors::InvalidArgument( "The value 'num_samples' for Op(class_center_sample) " "must be greater than 0, " "but the value given is %d.", num_samples)); PADDLE_ENFORCE_LE(num_samples, num_classes, platform::errors::InvalidArgument( "The value 'num_samples' for Op(class_center_sample) " "must be less than or equal to %d, " "but the value given is %d.", num_classes, num_samples)); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto place = dev_ctx.GetPlace(); int batch_size = label->numel(); // Algorithm: // We first randomly generate a value in [0, num_classes) on each position // in a array(shape[num_classes]). Then, we mark the element as negative // value in the array according input label. Now, we can sort the array // by ascending to ensure that the positive class center always in the // front of the sorted array. So, we can get the sampled class center // index by sorted keys. Finally, we can get the rempped label by remap // the input label according sampled class center. // step 1: Calculate num classes per device using nccl all reduce std::vector<T> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = num_classes; Tensor num_classes_per_device; framework::TensorFromVector(shard_dim_vec, ctx.cuda_device_context(), &num_classes_per_device); T* num_classes_per_device_ptr = num_classes_per_device.data<T>(); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { const auto& comm = platform::NCCLCommContext::Instance().Get(rid, ctx.GetPlace()); // use global calculate stream const auto calcu_stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(ctx.GetPlace())) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType(num_classes_per_device.type()), ncclSum, comm->comm(), calcu_stream)); } #endif // step 2: Determine temporary device storage requirements int num_buffer_ele = std::max(batch_size, num_classes); size_t cub_sort_temp_store_size = 0; PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>( nullptr, cub_sort_temp_store_size, nullptr, nullptr, nullptr, nullptr, num_buffer_ele, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); size_t cub_sum_temp_store_size = 0; NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0); PADDLE_ENFORCE_GPU_SUCCESS( (cub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>( nullptr, cub_sum_temp_store_size, unique_counting_iter_temp, nullptr, batch_size, ctx.cuda_device_context().stream()))); size_t cub_scan_temp_store_size = 0; ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples); PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan( nullptr, cub_scan_temp_store_size, num_classes_per_device_ptr, num_classes_per_device_ptr, actual_num_sampled_op_temp, nranks + 1, ctx.cuda_device_context().stream()))); size_t cub_temp_storage_bytes = std::max(std::max(cub_sort_temp_store_size, cub_scan_temp_store_size), cub_sum_temp_store_size); int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1; // step 3: Alloc buffer memory so that we can reuse allocated memory MemoryBuffer<T> memory_buffer = MemoryBuffer<T>(num_buffer_ele, num_temp_ele, nranks, ctx.GetPlace()); T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr(); T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr(); T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr(); T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr(); T* bound_index_ptr = memory_buffer.bound_index_ptr(); T* bound_value_ptr = memory_buffer.bound_value_ptr(); T* class_interval_ptr = memory_buffer.class_interval_ptr(); void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr(); // step 4: Calculate class interval among nranks PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); // step 5: random sample negative class center uint64_t seed_data; uint64_t increment; int vec_size = VectorizedSize<T>(cub_sort_keys_ptr); auto offset = ((num_classes - 1) / (NumBlocks(num_classes) * kNumCUDAThreads * vec_size) + 1) * vec_size; int device_id = ctx.GetPlace().GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && (!fix_seed)) { auto seed_offset = gen_cuda->IncrementOffset(offset); seed_data = seed_offset.first; increment = seed_offset.second; } else { std::random_device rnd; seed_data = fix_seed ? seed + rank : rnd(); increment = offset; } RandomSampleClassCenter<T><<<NumBlocks(num_classes), kNumCUDAThreads, 0, ctx.cuda_device_context().stream()>>>( num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr); // step 6: mark positive class center as negative value // fill the sort values to index 0, 1, ..., batch_size-1 MarkPositiveClassCenter<<<NumBlocks(batch_size), kNumCUDAThreads, 0, ctx.cuda_device_context().stream()>>>( batch_size, rank, class_interval_ptr, num_classes, label->data<T>(), cub_sort_keys_ptr); Range<T><<<NumBlocks(num_buffer_ele), kNumCUDAThreads, 0, ctx.cuda_device_context().stream()>>>(num_buffer_ele, cub_sort_values_ptr); // step 7: sort class center by ascending, so that positive class center // always be sampled. PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>( cub_temp_storage_ptr, cub_temp_storage_bytes, cub_sort_keys_ptr, cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_values_out_ptr, num_classes, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); // step 8: sort input label ascending PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>( cub_temp_storage_ptr, cub_temp_storage_bytes, label->data<T>(), cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_keys_ptr, batch_size, 0, sizeof(T) * 8, ctx.cuda_device_context().stream()))); // step 9: Calculate new index using InclusiveSum on ascending sorted input // label NotEqualToPreviousAdjacentIterator<T> unique_counting_iter( cub_sort_keys_out_ptr, 0); PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum< NotEqualToPreviousAdjacentIterator<T>, T*>( cub_temp_storage_ptr, cub_temp_storage_bytes, unique_counting_iter, cub_sort_values_ptr, batch_size, ctx.cuda_device_context().stream()))); // step 10: Calculate new class center bound among ranks GetClassCenterBound<T><<<NumBlocks(batch_size), kNumCUDAThreads, 0, ctx.cuda_device_context().stream()>>>( batch_size, nranks, class_interval_ptr, cub_sort_keys_out_ptr, cub_sort_values_ptr, bound_index_ptr, bound_value_ptr); // step 11: Calculate actual number of sampled class per device. // Since maybe num_positive_class_center > num_samples, // we need to ensure all positive class center per device are sampled. ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples); PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan( cub_temp_storage_ptr, cub_temp_storage_bytes, bound_value_ptr, num_classes_per_device_ptr, actual_num_sampled_op, nranks + 1, ctx.cuda_device_context().stream()))); // step 12: Calculate actual sampled class interval among nranks PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum( cub_temp_storage_ptr, cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, ctx.cuda_device_context().stream()))); // step 13: Get remapped label for output GetRemappedLabel<T><<<NumBlocks(batch_size), kNumCUDAThreads, 0, ctx.cuda_device_context().stream()>>>( batch_size, nranks, class_interval_ptr, bound_index_ptr, bound_value_ptr, cub_sort_keys_ptr, cub_sort_values_ptr, remapped_label->mutable_data<T>(ctx.GetPlace())); // step 14: Get sampled class center for output framework::TensorCopySync(num_classes_per_device, platform::CPUPlace(), &num_classes_per_device); T actual_num_samples = num_classes_per_device.data<T>()[rank + 1]; T* sampled_local_class_center_ptr = sampled_local_class_center->mutable_data<T>({actual_num_samples}, ctx.GetPlace()); memory::Copy(place, sampled_local_class_center_ptr, place, cub_sort_values_out_ptr, actual_num_samples * sizeof(T), nullptr); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( class_center_sample, ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int>);
48116e2e3b49f3757c304632d10e0f6c67595170.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void zgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row (=1) @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in SELLC/P @param d_colind magma_int_t* columnindices of A in SELLC/P @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); hipLaunchKernelGGL(( zgesellcmv_kernel), dim3(grid), dim3(blocksize), 0, magma_stream , m, n, blocksize, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); return MAGMA_SUCCESS; }
48116e2e3b49f3757c304632d10e0f6c67595170.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void zgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row (=1) @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in SELLC/P @param d_colind magma_int_t* columnindices of A in SELLC/P @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); zgesellcmv_kernel<<< grid, blocksize, 0, magma_stream >>> ( m, n, blocksize, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); return MAGMA_SUCCESS; }
52a2f627669ca20d2dbd49c681c27778fa41994f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //============================================== //TRABALHO DE PROGRAMAO PARALELA E DISTRIBUDA // Mandelbrot Set // CUDA // Daniela Kuinchtner, 152064 //============================================== #include <iostream> #include <cstdlib> using namespace std; #define THREADSPERBLOCK 1024 __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n); int main(int argc, char *argv[]){ int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); int n = max_row * max_column; size_t size = n * sizeof(char); int nBlocks = (n+THREADSPERBLOCK-1) / THREADSPERBLOCK; char *h_A; char *d_A; h_A = (char *)malloc(size); hipSetDevice(0); hipMalloc((void**)&d_A, size); hipMemcpy(d_A, h_A, size ,hipMemcpyHostToDevice); hipLaunchKernelGGL(( brot) , dim3(nBlocks), dim3(THREADSPERBLOCK) , 0, 0, d_A, max_row, max_column, max_n, n); hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost); int i = 0; for(int r = 0; r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ cout << h_A[i++]; } cout << "\n"; } hipFree(d_A); } __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n){ int k = 0; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ int r = i / max_column; int c = i % max_column; float x=0, y=0, tmp=0; while((x*x + y*y) < 4 && ++k < max_n) { tmp = x*x - y*y + ((float) c * 2 / max_column - 1.5); y = x*y*2 + ((float) r * 2 / max_row - 1); x = tmp; } d_A[i]=(k == max_n ? '#' : '.'); } }
52a2f627669ca20d2dbd49c681c27778fa41994f.cu
//============================================== //TRABALHO DE PROGRAMAÇÃO PARALELA E DISTRIBUÍDA // Mandelbrot Set // CUDA // Daniela Kuinchtner, 152064 //============================================== #include <iostream> #include <cstdlib> using namespace std; #define THREADSPERBLOCK 1024 __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n); int main(int argc, char *argv[]){ int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); int n = max_row * max_column; size_t size = n * sizeof(char); int nBlocks = (n+THREADSPERBLOCK-1) / THREADSPERBLOCK; char *h_A; char *d_A; h_A = (char *)malloc(size); cudaSetDevice(0); cudaMalloc((void**)&d_A, size); cudaMemcpy(d_A, h_A, size ,cudaMemcpyHostToDevice); brot <<< nBlocks, THREADSPERBLOCK >>> (d_A, max_row, max_column, max_n, n); cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); int i = 0; for(int r = 0; r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ cout << h_A[i++]; } cout << "\n"; } cudaFree(d_A); } __global__ void brot(char *d_A, int max_row, int max_column, int max_n, int n){ int k = 0; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ int r = i / max_column; int c = i % max_column; float x=0, y=0, tmp=0; while((x*x + y*y) < 4 && ++k < max_n) { tmp = x*x - y*y + ((float) c * 2 / max_column - 1.5); y = x*y*2 + ((float) r * 2 / max_row - 1); x = tmp; } d_A[i]=(k == max_n ? '#' : '.'); } }
405974d7ec3d5821ccb2ecd0ebe81e196f3bc6b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <time.h> #include <math.h> #include <iomanip> using namespace std; void szeregLeibniza(float* tablica, int rozmiar) { for (int i = 0; i < rozmiar; i++) { tablica[i] = pow(-1, i) * (1.0 / (2.0 * i + 1.0)); } } float liczba_pi(float* tablica, int rozmiar) { float* tablica1 = new float[rozmiar]; for (int i = 0; i < rozmiar; i++) { tablica1[i] = tablica[i]; } for (int i = rozmiar / 2; i > 0; i = i / 2) { for (int j = 0; j < i; j++) { tablica1[j] = tablica1[2 * j] + tablica1[2 * j + 1]; } } return tablica1[0]; } void szeregLeibniza1(double* tablica, int rozmiar) { for (int i = 0; i < rozmiar; i++) { tablica[i] = pow(-1, i) * (1.0 / (2.0 * i + 1.0)); } } double liczba_pi1(double* tablica, int rozmiar) { double* tablica1 = new double[rozmiar]; for (int i = 0; i < rozmiar; i++) { tablica1[i] = tablica[i]; } for (int i = rozmiar / 2; i > 0; i = i / 2) { for (int j = 0; j < i; j++) { tablica1[j] = tablica1[2 * j] + tablica1[2 * j + 1]; } } return tablica1[0]; } __global__ void deviceLeibniz(float* tablica, size_t rozmiar) { unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned long long int siatka = blockDim.x * gridDim.x; for (i; i < rozmiar; i += siatka) { if (i % 2 == 0) tablica[i] = (1.0 / (2.0 * i + 1.0)); else tablica[i] = (-1.0) * (1.0 / (2.0 * i + 1.0)); } } __inline__ __device__ float redukcjaWarpow(float value) { for (int off = warpSize/2; off > 0; off /= 2) value += __shfl_down(value, off); return value; } __inline__ __device__ float redukcjaBlokow(float value) { static __shared__ float shared_mem[32]; // Shared mem for 32 partial sums int ll = threadIdx.x % warpSize; int ww = threadIdx.x / warpSize; value = redukcjaWarpow(value); // Each warp performs partial reduction if (ll == 0) shared_mem[ww] = value; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed value = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[ll] : 0; if (ww == 0) value = redukcjaWarpow(value); //Final reduce within first warp return value; } __global__ void deviceAdd(float* tablica, float* wyjscie, size_t rozmiar) { float sumka = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < rozmiar; i += blockDim.x * gridDim.x) { sumka += tablica[i]; } sumka = redukcjaBlokow(sumka); if (threadIdx.x == 0) wyjscie[blockIdx.x] = sumka; } __global__ void deviceLeibniz1(double* tablica, size_t rozmiar) { unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned long long int siatka = blockDim.x * gridDim.x; for (i; i < rozmiar; i += siatka) { if (i % 2 == 0) tablica[i] = (1.0 / (2.0 * i + 1.0)); else tablica[i] = (-1.0) * (1.0 / (2.0 * i + 1.0)); } } __inline__ __device__ double redukcjaWarpow1(double value) { for (int off = warpSize / 2; off > 0; off /= 2) value += __shfl_down(value, off); return value; } __inline__ __device__ double redukcjaBlokow1(double value) { static __shared__ double shared_mem[32]; // Shared mem for 32 partial sums int ll = threadIdx.x % warpSize; int ww = threadIdx.x / warpSize; value = redukcjaWarpow1(value); // Each warp performs partial reduction if (ll == 0) shared_mem[ww] = value; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed value = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[ll] : 0; if (ww == 0) value = redukcjaWarpow1(value); //Final reduce within first warp return value; } __global__ void deviceAdd1(double* tablica, double* wyjscie, size_t rozmiar) { double sumka = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < rozmiar; i += blockDim.x * gridDim.x) { sumka += tablica[i]; } sumka = redukcjaBlokow1(sumka); if (threadIdx.x == 0) wyjscie[blockIdx.x] = sumka; } int main() { while (true) { int rozmiar; cout << "Podaj rozmiar wektora: "; cin >> rozmiar; cout << endl; float* tablica = new float[rozmiar]; double* tablica1 = new double[rozmiar]; clock_t start; double duration_on_CPU; double duration_on_CPU1; start = clock(); szeregLeibniza(tablica, rozmiar); float wynik = 4.0 * liczba_pi(tablica, rozmiar); duration_on_CPU = 1000 * (clock() - start) / CLOCKS_PER_SEC; start = clock(); szeregLeibniza1(tablica1, rozmiar); double wynik1 = 4.0 * liczba_pi1(tablica1, rozmiar); duration_on_CPU1 = 1000 * (clock() - start) / CLOCKS_PER_SEC; cout << "Obliczona liczba z szeregu Leibniza na CPU wynosi: " << setprecision(9) << wynik << " w prezycji float." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_CPU << " milisekund." << endl << endl; cout << "Obliczona liczba z szeregu Leibniza na CPU wynosi: " << setprecision(9) << wynik1 << " w precyzji double." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_CPU1 << " milisekund." << endl << endl; float host_wynik; double host_wynik1; float* dev_tablica; float* dev_wynik; double* dev_tablica1; double* dev_wynik1; int rozmiarBloku = 512; int liczbaBlokow = (rozmiar / rozmiarBloku) + 1; hipMalloc((void**)&dev_tablica, rozmiar * sizeof(float)); hipMalloc((void**)&dev_wynik, liczbaBlokow * sizeof(float)); hipMalloc((void**)&dev_tablica1, rozmiar * sizeof(double)); hipMalloc((void**)&dev_wynik1, liczbaBlokow * sizeof(double)); double duration_on_GPU; double duration_on_GPU1; start = clock(); deviceLeibniz << <liczbaBlokow, rozmiarBloku >> > (dev_tablica, rozmiar); hipDeviceSynchronize(); deviceAdd << <liczbaBlokow, rozmiarBloku >> > (dev_tablica, dev_wynik, rozmiar); hipDeviceSynchronize(); deviceAdd << <1, 1024 >> > (dev_wynik, dev_tablica, liczbaBlokow); hipDeviceSynchronize(); duration_on_GPU = 1000 * (clock() - start) / CLOCKS_PER_SEC; hipMemcpy(&host_wynik, dev_tablica, sizeof(float), hipMemcpyDeviceToHost); start = clock(); deviceLeibniz1 << <liczbaBlokow, rozmiarBloku >> > (dev_tablica1, rozmiar); hipDeviceSynchronize(); deviceAdd1 << <liczbaBlokow, rozmiarBloku >> > (dev_tablica1, dev_wynik1, rozmiar); hipDeviceSynchronize(); deviceAdd1 << <1, 1024 >> > (dev_wynik1, dev_tablica1, liczbaBlokow); hipDeviceSynchronize(); duration_on_GPU1 = 1000 * (clock() - start) / CLOCKS_PER_SEC; hipMemcpy(&host_wynik1, dev_tablica1, sizeof(double), hipMemcpyDeviceToHost); cout << "Obliczona liczba z szeregu Leibniza na GPU wynosi: " << setprecision(9) << 4.0 * host_wynik << " w prezycji float." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_GPU << " milisekund." << endl << endl; cout << "Obliczona liczba z szeregu Leibniza na GPU wynosi: " << setprecision(9) << 4.0 * host_wynik1 << " w precyzji double." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_GPU1 << " milisekund." << endl << endl; cout << endl << endl; cout << endl << endl; double stosunek; if (duration_on_GPU == 0) cout << "Czas obliczen w precyzji float na GPU jest zerowy i niemozliwe jest wyliczenie stosunku." << endl; else { stosunek = duration_on_CPU / duration_on_GPU; cout << "Obliczenia w precyzji float na GPU sa " << stosunek << " razy szybsze." << endl; } if (duration_on_GPU1 == 0) cout << "Czas obliczen w precyzji double na GPU jest zerowy i niemozliwe jest wyliczenie stosunku." << endl; else { stosunek = duration_on_CPU1 / duration_on_GPU1; cout << "Obliczenia w precyzji double na GPU sa " << stosunek << " razy szybsze." << endl; } cout << endl; hipFree(dev_tablica); hipFree(dev_wynik); hipFree(dev_tablica1); hipFree(dev_wynik1); delete[]tablica; delete[]tablica1; } return 0; }
405974d7ec3d5821ccb2ecd0ebe81e196f3bc6b3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <time.h> #include <math.h> #include <iomanip> using namespace std; void szeregLeibniza(float* tablica, int rozmiar) { for (int i = 0; i < rozmiar; i++) { tablica[i] = pow(-1, i) * (1.0 / (2.0 * i + 1.0)); } } float liczba_pi(float* tablica, int rozmiar) { float* tablica1 = new float[rozmiar]; for (int i = 0; i < rozmiar; i++) { tablica1[i] = tablica[i]; } for (int i = rozmiar / 2; i > 0; i = i / 2) { for (int j = 0; j < i; j++) { tablica1[j] = tablica1[2 * j] + tablica1[2 * j + 1]; } } return tablica1[0]; } void szeregLeibniza1(double* tablica, int rozmiar) { for (int i = 0; i < rozmiar; i++) { tablica[i] = pow(-1, i) * (1.0 / (2.0 * i + 1.0)); } } double liczba_pi1(double* tablica, int rozmiar) { double* tablica1 = new double[rozmiar]; for (int i = 0; i < rozmiar; i++) { tablica1[i] = tablica[i]; } for (int i = rozmiar / 2; i > 0; i = i / 2) { for (int j = 0; j < i; j++) { tablica1[j] = tablica1[2 * j] + tablica1[2 * j + 1]; } } return tablica1[0]; } __global__ void deviceLeibniz(float* tablica, size_t rozmiar) { unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned long long int siatka = blockDim.x * gridDim.x; for (i; i < rozmiar; i += siatka) { if (i % 2 == 0) tablica[i] = (1.0 / (2.0 * i + 1.0)); else tablica[i] = (-1.0) * (1.0 / (2.0 * i + 1.0)); } } __inline__ __device__ float redukcjaWarpow(float value) { for (int off = warpSize/2; off > 0; off /= 2) value += __shfl_down(value, off); return value; } __inline__ __device__ float redukcjaBlokow(float value) { static __shared__ float shared_mem[32]; // Shared mem for 32 partial sums int ll = threadIdx.x % warpSize; int ww = threadIdx.x / warpSize; value = redukcjaWarpow(value); // Each warp performs partial reduction if (ll == 0) shared_mem[ww] = value; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed value = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[ll] : 0; if (ww == 0) value = redukcjaWarpow(value); //Final reduce within first warp return value; } __global__ void deviceAdd(float* tablica, float* wyjscie, size_t rozmiar) { float sumka = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < rozmiar; i += blockDim.x * gridDim.x) { sumka += tablica[i]; } sumka = redukcjaBlokow(sumka); if (threadIdx.x == 0) wyjscie[blockIdx.x] = sumka; } __global__ void deviceLeibniz1(double* tablica, size_t rozmiar) { unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned long long int siatka = blockDim.x * gridDim.x; for (i; i < rozmiar; i += siatka) { if (i % 2 == 0) tablica[i] = (1.0 / (2.0 * i + 1.0)); else tablica[i] = (-1.0) * (1.0 / (2.0 * i + 1.0)); } } __inline__ __device__ double redukcjaWarpow1(double value) { for (int off = warpSize / 2; off > 0; off /= 2) value += __shfl_down(value, off); return value; } __inline__ __device__ double redukcjaBlokow1(double value) { static __shared__ double shared_mem[32]; // Shared mem for 32 partial sums int ll = threadIdx.x % warpSize; int ww = threadIdx.x / warpSize; value = redukcjaWarpow1(value); // Each warp performs partial reduction if (ll == 0) shared_mem[ww] = value; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed value = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[ll] : 0; if (ww == 0) value = redukcjaWarpow1(value); //Final reduce within first warp return value; } __global__ void deviceAdd1(double* tablica, double* wyjscie, size_t rozmiar) { double sumka = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < rozmiar; i += blockDim.x * gridDim.x) { sumka += tablica[i]; } sumka = redukcjaBlokow1(sumka); if (threadIdx.x == 0) wyjscie[blockIdx.x] = sumka; } int main() { while (true) { int rozmiar; cout << "Podaj rozmiar wektora: "; cin >> rozmiar; cout << endl; float* tablica = new float[rozmiar]; double* tablica1 = new double[rozmiar]; clock_t start; double duration_on_CPU; double duration_on_CPU1; start = clock(); szeregLeibniza(tablica, rozmiar); float wynik = 4.0 * liczba_pi(tablica, rozmiar); duration_on_CPU = 1000 * (clock() - start) / CLOCKS_PER_SEC; start = clock(); szeregLeibniza1(tablica1, rozmiar); double wynik1 = 4.0 * liczba_pi1(tablica1, rozmiar); duration_on_CPU1 = 1000 * (clock() - start) / CLOCKS_PER_SEC; cout << "Obliczona liczba z szeregu Leibniza na CPU wynosi: " << setprecision(9) << wynik << " w prezycji float." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_CPU << " milisekund." << endl << endl; cout << "Obliczona liczba z szeregu Leibniza na CPU wynosi: " << setprecision(9) << wynik1 << " w precyzji double." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_CPU1 << " milisekund." << endl << endl; float host_wynik; double host_wynik1; float* dev_tablica; float* dev_wynik; double* dev_tablica1; double* dev_wynik1; int rozmiarBloku = 512; int liczbaBlokow = (rozmiar / rozmiarBloku) + 1; cudaMalloc((void**)&dev_tablica, rozmiar * sizeof(float)); cudaMalloc((void**)&dev_wynik, liczbaBlokow * sizeof(float)); cudaMalloc((void**)&dev_tablica1, rozmiar * sizeof(double)); cudaMalloc((void**)&dev_wynik1, liczbaBlokow * sizeof(double)); double duration_on_GPU; double duration_on_GPU1; start = clock(); deviceLeibniz << <liczbaBlokow, rozmiarBloku >> > (dev_tablica, rozmiar); cudaDeviceSynchronize(); deviceAdd << <liczbaBlokow, rozmiarBloku >> > (dev_tablica, dev_wynik, rozmiar); cudaDeviceSynchronize(); deviceAdd << <1, 1024 >> > (dev_wynik, dev_tablica, liczbaBlokow); cudaDeviceSynchronize(); duration_on_GPU = 1000 * (clock() - start) / CLOCKS_PER_SEC; cudaMemcpy(&host_wynik, dev_tablica, sizeof(float), cudaMemcpyDeviceToHost); start = clock(); deviceLeibniz1 << <liczbaBlokow, rozmiarBloku >> > (dev_tablica1, rozmiar); cudaDeviceSynchronize(); deviceAdd1 << <liczbaBlokow, rozmiarBloku >> > (dev_tablica1, dev_wynik1, rozmiar); cudaDeviceSynchronize(); deviceAdd1 << <1, 1024 >> > (dev_wynik1, dev_tablica1, liczbaBlokow); cudaDeviceSynchronize(); duration_on_GPU1 = 1000 * (clock() - start) / CLOCKS_PER_SEC; cudaMemcpy(&host_wynik1, dev_tablica1, sizeof(double), cudaMemcpyDeviceToHost); cout << "Obliczona liczba z szeregu Leibniza na GPU wynosi: " << setprecision(9) << 4.0 * host_wynik << " w prezycji float." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_GPU << " milisekund." << endl << endl; cout << "Obliczona liczba z szeregu Leibniza na GPU wynosi: " << setprecision(9) << 4.0 * host_wynik1 << " w precyzji double." << endl; cout << "A czas jej obliczenia wynosi: " << duration_on_GPU1 << " milisekund." << endl << endl; cout << endl << endl; cout << endl << endl; double stosunek; if (duration_on_GPU == 0) cout << "Czas obliczen w precyzji float na GPU jest zerowy i niemozliwe jest wyliczenie stosunku." << endl; else { stosunek = duration_on_CPU / duration_on_GPU; cout << "Obliczenia w precyzji float na GPU sa " << stosunek << " razy szybsze." << endl; } if (duration_on_GPU1 == 0) cout << "Czas obliczen w precyzji double na GPU jest zerowy i niemozliwe jest wyliczenie stosunku." << endl; else { stosunek = duration_on_CPU1 / duration_on_GPU1; cout << "Obliczenia w precyzji double na GPU sa " << stosunek << " razy szybsze." << endl; } cout << endl; cudaFree(dev_tablica); cudaFree(dev_wynik); cudaFree(dev_tablica1); cudaFree(dev_wynik1); delete[]tablica; delete[]tablica1; } return 0; }
bb9206500d28386d67017a11229252721c28f946.hip
// !!! This is a file automatically generated by hipify!!! /* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <stdint.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include <cstdlib> #include <algorithm> #include <iostream> using namespace std; const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, nblock = 32; ////////////////////////////////////////////////////////////////////////////////////////// __global__ void crossFilter(const double *Params, const float *W1, const float *W2, const float *UtU, float *WtW){ __shared__ float shW1[nblock*nt0], shW2[nblock*nt0]; float x; int tidx, tidy , bidx, bidy, i, NT, Nfilt, t; tidx = threadIdx.x; tidy = threadIdx.y; bidx = blockIdx.x; bidy = blockIdx.y; Nfilt = (int) Params[1]; while(tidx<nt0){ shW1[tidx + tidy * nt0] = W1[tidx + (tidy+bidx*nblock) * nt0]; shW2[tidx + tidy * nt0] = W2[tidx + (tidy+bidy*nblock) * nt0]; tidx+= nblock; } tidx = threadIdx.x; __syncthreads(); for(i=0;i<2*nt0-1;i++){ x = 0.0f; if(i<nt0) for(t=0;t<i+1;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; else for(t=i-nt0+1;t<nt0;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; WtW[tidx+bidx*nblock + (tidy + bidy*nblock)*Nfilt + i*Nfilt*Nfilt] = x * UtU[tidx+bidx*nblock + (tidy + bidy*nblock)*Nfilt]; } } ////////////////////////////////////////////////////////////////////////////////////////// /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare input variables*/ double *Params, *d_Params; int Nfilt, NT; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* read Params and copy to GPU */ Params = (double*) mxGetData(prhs[0]); NT = (int) Params[0]; Nfilt = (int) Params[1]; hipMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0])); hipMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),hipMemcpyHostToDevice); /* collect input GPU variables*/ mxGPUArray const *W1, *W2, *UtU; const float *d_W1,*d_W2, *d_UtU; W1 = mxGPUCreateFromMxArray(prhs[1]); d_W1 = (float const *)(mxGPUGetDataReadOnly(W1)); W2 = mxGPUCreateFromMxArray(prhs[2]); d_W2 = (float const *)(mxGPUGetDataReadOnly(W2)); UtU = mxGPUCreateFromMxArray(prhs[3]); d_UtU = (float const *)(mxGPUGetDataReadOnly(UtU)); mxGPUArray *WtW; float *d_WtW; const mwSize dimsu[] = {Nfilt, Nfilt, 2*nt0-1}; WtW = mxGPUCreateGPUArray(3, dimsu, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_WtW = (float *)(mxGPUGetData(WtW)); dim3 grid(Nfilt/nblock, Nfilt/nblock); dim3 block(nblock, nblock); hipLaunchKernelGGL(( crossFilter), dim3(grid), dim3(block), 0, 0, d_Params, d_W1, d_W2, d_UtU, d_WtW); plhs[0] = mxGPUCreateMxArrayOnGPU(WtW); hipFree(d_Params); mxGPUDestroyGPUArray(WtW); mxGPUDestroyGPUArray(W1); mxGPUDestroyGPUArray(W2); mxGPUDestroyGPUArray(UtU); }
bb9206500d28386d67017a11229252721c28f946.cu
/* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <stdint.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include <cstdlib> #include <algorithm> #include <iostream> using namespace std; const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, nblock = 32; ////////////////////////////////////////////////////////////////////////////////////////// __global__ void crossFilter(const double *Params, const float *W1, const float *W2, const float *UtU, float *WtW){ __shared__ float shW1[nblock*nt0], shW2[nblock*nt0]; float x; int tidx, tidy , bidx, bidy, i, NT, Nfilt, t; tidx = threadIdx.x; tidy = threadIdx.y; bidx = blockIdx.x; bidy = blockIdx.y; Nfilt = (int) Params[1]; while(tidx<nt0){ shW1[tidx + tidy * nt0] = W1[tidx + (tidy+bidx*nblock) * nt0]; shW2[tidx + tidy * nt0] = W2[tidx + (tidy+bidy*nblock) * nt0]; tidx+= nblock; } tidx = threadIdx.x; __syncthreads(); for(i=0;i<2*nt0-1;i++){ x = 0.0f; if(i<nt0) for(t=0;t<i+1;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; else for(t=i-nt0+1;t<nt0;t++) x += shW1[t + nt0 * tidx] * shW2[t + (nt0-i-1) + nt0 * tidy]; WtW[tidx+bidx*nblock + (tidy + bidy*nblock)*Nfilt + i*Nfilt*Nfilt] = x * UtU[tidx+bidx*nblock + (tidy + bidy*nblock)*Nfilt]; } } ////////////////////////////////////////////////////////////////////////////////////////// /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare input variables*/ double *Params, *d_Params; int Nfilt, NT; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* read Params and copy to GPU */ Params = (double*) mxGetData(prhs[0]); NT = (int) Params[0]; Nfilt = (int) Params[1]; cudaMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0])); cudaMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),cudaMemcpyHostToDevice); /* collect input GPU variables*/ mxGPUArray const *W1, *W2, *UtU; const float *d_W1,*d_W2, *d_UtU; W1 = mxGPUCreateFromMxArray(prhs[1]); d_W1 = (float const *)(mxGPUGetDataReadOnly(W1)); W2 = mxGPUCreateFromMxArray(prhs[2]); d_W2 = (float const *)(mxGPUGetDataReadOnly(W2)); UtU = mxGPUCreateFromMxArray(prhs[3]); d_UtU = (float const *)(mxGPUGetDataReadOnly(UtU)); mxGPUArray *WtW; float *d_WtW; const mwSize dimsu[] = {Nfilt, Nfilt, 2*nt0-1}; WtW = mxGPUCreateGPUArray(3, dimsu, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); d_WtW = (float *)(mxGPUGetData(WtW)); dim3 grid(Nfilt/nblock, Nfilt/nblock); dim3 block(nblock, nblock); crossFilter<<<grid, block>>>(d_Params, d_W1, d_W2, d_UtU, d_WtW); plhs[0] = mxGPUCreateMxArrayOnGPU(WtW); cudaFree(d_Params); mxGPUDestroyGPUArray(WtW); mxGPUDestroyGPUArray(W1); mxGPUDestroyGPUArray(W2); mxGPUDestroyGPUArray(UtU); }
f8e9120326b299da9da64f0827d8464dbd3ebab7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * ---------------------------------------------------------------------------* * @brief Core Number implementation * * @file core_number.cu * --------------------------------------------------------------------------*/ #include <cugraph.h> #include "utilities/error_utils.h" #include <Hornet.hpp> #include <Static/CoreNumber/CoreNumber.cuh> #include <rmm_utils.h> #include <nvgraph_gdf.h> namespace cugraph { namespace detail { void core_number_impl(Graph *graph, int *core_number) { using HornetGraph = hornet::gpu::HornetStatic<int>; using HornetInit = hornet::HornetInit<int>; using CoreNumber = hornets_nest::CoreNumberStatic; HornetInit init(graph->numberOfVertices, graph->adjList->indices->size, static_cast<int*>(graph->adjList->offsets->data), static_cast<int*>(graph->adjList->indices->data)); HornetGraph hnt(init, hornet::DeviceType::DEVICE); CoreNumber cn(hnt, core_number); cn.run(); } struct FilterEdges { int k; int* core_number; FilterEdges(int _k, thrust::device_ptr<int> core_num) : k(_k), core_number(core_num.get()) {} template <typename T> __host__ __device__ bool operator()(T t) { int src = thrust::get<0>(t); int dst = thrust::get<1>(t); return (core_number[src] >= k) && (core_number[dst] >= k); } }; template <typename WT> void extract_edges( Graph *i_graph, Graph *o_graph, thrust::device_ptr<int> c_ptr, int k, int filteredEdgeCount) { hipStream_t stream{nullptr}; //Allocate output columns o_graph->edgeList = new gdf_edge_list; o_graph->edgeList->src_indices = new gdf_column; o_graph->edgeList->dest_indices = new gdf_column; o_graph->edgeList->ownership = 2; bool hasData = (i_graph->edgeList->edge_data != nullptr); //Allocate underlying memory for output columns int *o_src, *o_dst, *o_wgt; ALLOC_TRY((void**)&o_src, sizeof(int) * filteredEdgeCount, stream); ALLOC_TRY((void**)&o_dst, sizeof(int) * filteredEdgeCount, stream); int *i_src = static_cast<int*>(i_graph->edgeList->src_indices->data); int *i_dst = static_cast<int*>(i_graph->edgeList->dest_indices->data); WT *i_wgt = nullptr; gdf_column_view(o_graph->edgeList->src_indices, o_src, nullptr, filteredEdgeCount, GDF_INT32); gdf_column_view(o_graph->edgeList->dest_indices, o_dst, nullptr, filteredEdgeCount, GDF_INT32); //Set pointers and allocate memory/columns in case input graph has edge_data if (hasData) { o_graph->edgeList->edge_data = new gdf_column; ALLOC_TRY((void**)&o_wgt, sizeof(WT) * filteredEdgeCount, stream); i_wgt = static_cast<WT*>(i_graph->edgeList->edge_data->data); gdf_column_view(o_graph->edgeList->edge_data, o_wgt, nullptr, filteredEdgeCount, i_graph->edgeList->edge_data->dtype); } gdf_size_type nE = i_graph->edgeList->src_indices->size; //If an edge satisfies k-core conditions i.e. core_num[src] and core_num[dst] //are both greater than or equal to k, copy it to the output graph if (hasData) { auto inEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(i_src), thrust::device_pointer_cast(i_dst), thrust::device_pointer_cast(i_wgt))); auto outEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(o_src), thrust::device_pointer_cast(o_dst), thrust::device_pointer_cast(o_wgt))); auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream), inEdge, inEdge + nE, outEdge, FilterEdges(k, c_ptr)); if ((ptr - outEdge) != filteredEdgeCount) { CUGRAPH_FAIL("Edge extraction failed"); } } else { auto inEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(i_src), thrust::device_pointer_cast(i_dst))); auto outEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(o_src), thrust::device_pointer_cast(o_dst))); auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream), inEdge, inEdge + nE, outEdge, FilterEdges(k, c_ptr)); if ((ptr - outEdge) != filteredEdgeCount) { CUGRAPH_FAIL("Edge extraction failed"); } } } } //namespace //Extract a subgraph from in_graph (with or without weights) //to out_graph based on whether edges in in_graph satisfy kcore //conditions. //i.e. All edges (s,d,w) in in_graph are copied over to out_graph //if core_num[s] and core_num[d] are greater than or equal to k. void extract_subgraph(Graph *in_graph, Graph *out_graph, int * vid, int * core_num, int k, gdf_size_type len, gdf_size_type nV) { hipStream_t stream{nullptr}; rmm::device_vector<int> c; thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(core_num); //We cannot assume that the user provided core numbers per vertex will be in //order. Therefore, they need to be reordered by the vertex ids in a temporary //array. c.resize(nV, 0); thrust::device_ptr<int> v_ptr = thrust::device_pointer_cast(vid); thrust::scatter(rmm::exec_policy(stream)->on(stream), c_ptr, c_ptr + len, v_ptr, c.begin()); c_ptr = thrust::device_pointer_cast(c.data().get()); cugraph::add_edge_list(in_graph); thrust::device_ptr<int> src = thrust::device_pointer_cast(static_cast<int*>(in_graph->edgeList->src_indices->data)); thrust::device_ptr<int> dst = thrust::device_pointer_cast(static_cast<int*>(in_graph->edgeList->dest_indices->data)); //Count number of edges in the input graph that satisfy kcore conditions //i.e. core_num[src] and core_num[dst] are both greater than or equal to k gdf_size_type nE = in_graph->edgeList->src_indices->size; auto edge = thrust::make_zip_iterator(thrust::make_tuple(src, dst)); int filteredEdgeCount = thrust::count_if(rmm::exec_policy(stream)->on(stream), edge, edge + nE, detail::FilterEdges(k, c_ptr)); //Extract the relevant edges that have satisfied k-core conditions and put them in the output graph if (in_graph->edgeList->edge_data != nullptr) { switch (in_graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return detail::extract_edges<float> (in_graph, out_graph, c_ptr, k, filteredEdgeCount); case GDF_FLOAT64: return detail::extract_edges<double>(in_graph, out_graph, c_ptr, k, filteredEdgeCount); default: CUGRAPH_FAIL("Unsupported data type: edge data needs to be float32 or float64"); } } else { return detail::extract_edges<float> (in_graph, out_graph, c_ptr, k, filteredEdgeCount); } } void core_number(Graph *graph, gdf_column *core_number) { CHECK_GRAPH(graph) CUGRAPH_EXPECTS(graph->adjList->offsets->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(graph->adjList->indices->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(core_number->dtype == GDF_INT32, "Unsupported data type: core number needs to be int32"); CUGRAPH_EXPECTS(core_number->size == graph->numberOfVertices, "Column size mismatch"); return detail::core_number_impl(graph, static_cast<int*>(core_number->data)); } void k_core(Graph *in_graph, int k, gdf_column *vertex_id, gdf_column *core_number, Graph *out_graph) { CUGRAPH_EXPECTS(out_graph != nullptr, "Invalid API parameter: out_graph is NULL"); CUGRAPH_EXPECTS(in_graph != nullptr, "Invalid API parameter: in_graph is NULL"); gdf_size_type nV = in_graph->numberOfVertices; CUGRAPH_EXPECTS(in_graph->adjList->offsets->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(in_graph->adjList->indices->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS((vertex_id != nullptr) && (core_number != nullptr), "Invalid API parameter"); CUGRAPH_EXPECTS(vertex_id->dtype == GDF_INT32, "Unsupported data type"); CUGRAPH_EXPECTS(core_number->dtype == GDF_INT32, "Unsupported data type"); CUGRAPH_EXPECTS(core_number->size == vertex_id->size, "Invalid API parameter"); CUGRAPH_EXPECTS(core_number->size == nV, "Invalid API parameter"); CUGRAPH_EXPECTS(k >= 0, "Invalid API parameter"); int * vertex_identifier_ptr = static_cast<int*>(vertex_id->data); int * core_number_ptr = static_cast<int*>(core_number->data); gdf_size_type vLen = vertex_id->size; extract_subgraph(in_graph, out_graph, vertex_identifier_ptr, core_number_ptr, k, vLen, nV); } } //namespace cugraph
f8e9120326b299da9da64f0827d8464dbd3ebab7.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * ---------------------------------------------------------------------------* * @brief Core Number implementation * * @file core_number.cu * --------------------------------------------------------------------------*/ #include <cugraph.h> #include "utilities/error_utils.h" #include <Hornet.hpp> #include <Static/CoreNumber/CoreNumber.cuh> #include <rmm_utils.h> #include <nvgraph_gdf.h> namespace cugraph { namespace detail { void core_number_impl(Graph *graph, int *core_number) { using HornetGraph = hornet::gpu::HornetStatic<int>; using HornetInit = hornet::HornetInit<int>; using CoreNumber = hornets_nest::CoreNumberStatic; HornetInit init(graph->numberOfVertices, graph->adjList->indices->size, static_cast<int*>(graph->adjList->offsets->data), static_cast<int*>(graph->adjList->indices->data)); HornetGraph hnt(init, hornet::DeviceType::DEVICE); CoreNumber cn(hnt, core_number); cn.run(); } struct FilterEdges { int k; int* core_number; FilterEdges(int _k, thrust::device_ptr<int> core_num) : k(_k), core_number(core_num.get()) {} template <typename T> __host__ __device__ bool operator()(T t) { int src = thrust::get<0>(t); int dst = thrust::get<1>(t); return (core_number[src] >= k) && (core_number[dst] >= k); } }; template <typename WT> void extract_edges( Graph *i_graph, Graph *o_graph, thrust::device_ptr<int> c_ptr, int k, int filteredEdgeCount) { cudaStream_t stream{nullptr}; //Allocate output columns o_graph->edgeList = new gdf_edge_list; o_graph->edgeList->src_indices = new gdf_column; o_graph->edgeList->dest_indices = new gdf_column; o_graph->edgeList->ownership = 2; bool hasData = (i_graph->edgeList->edge_data != nullptr); //Allocate underlying memory for output columns int *o_src, *o_dst, *o_wgt; ALLOC_TRY((void**)&o_src, sizeof(int) * filteredEdgeCount, stream); ALLOC_TRY((void**)&o_dst, sizeof(int) * filteredEdgeCount, stream); int *i_src = static_cast<int*>(i_graph->edgeList->src_indices->data); int *i_dst = static_cast<int*>(i_graph->edgeList->dest_indices->data); WT *i_wgt = nullptr; gdf_column_view(o_graph->edgeList->src_indices, o_src, nullptr, filteredEdgeCount, GDF_INT32); gdf_column_view(o_graph->edgeList->dest_indices, o_dst, nullptr, filteredEdgeCount, GDF_INT32); //Set pointers and allocate memory/columns in case input graph has edge_data if (hasData) { o_graph->edgeList->edge_data = new gdf_column; ALLOC_TRY((void**)&o_wgt, sizeof(WT) * filteredEdgeCount, stream); i_wgt = static_cast<WT*>(i_graph->edgeList->edge_data->data); gdf_column_view(o_graph->edgeList->edge_data, o_wgt, nullptr, filteredEdgeCount, i_graph->edgeList->edge_data->dtype); } gdf_size_type nE = i_graph->edgeList->src_indices->size; //If an edge satisfies k-core conditions i.e. core_num[src] and core_num[dst] //are both greater than or equal to k, copy it to the output graph if (hasData) { auto inEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(i_src), thrust::device_pointer_cast(i_dst), thrust::device_pointer_cast(i_wgt))); auto outEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(o_src), thrust::device_pointer_cast(o_dst), thrust::device_pointer_cast(o_wgt))); auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream), inEdge, inEdge + nE, outEdge, FilterEdges(k, c_ptr)); if ((ptr - outEdge) != filteredEdgeCount) { CUGRAPH_FAIL("Edge extraction failed"); } } else { auto inEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(i_src), thrust::device_pointer_cast(i_dst))); auto outEdge = thrust::make_zip_iterator(thrust::make_tuple( thrust::device_pointer_cast(o_src), thrust::device_pointer_cast(o_dst))); auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream), inEdge, inEdge + nE, outEdge, FilterEdges(k, c_ptr)); if ((ptr - outEdge) != filteredEdgeCount) { CUGRAPH_FAIL("Edge extraction failed"); } } } } //namespace //Extract a subgraph from in_graph (with or without weights) //to out_graph based on whether edges in in_graph satisfy kcore //conditions. //i.e. All edges (s,d,w) in in_graph are copied over to out_graph //if core_num[s] and core_num[d] are greater than or equal to k. void extract_subgraph(Graph *in_graph, Graph *out_graph, int * vid, int * core_num, int k, gdf_size_type len, gdf_size_type nV) { cudaStream_t stream{nullptr}; rmm::device_vector<int> c; thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(core_num); //We cannot assume that the user provided core numbers per vertex will be in //order. Therefore, they need to be reordered by the vertex ids in a temporary //array. c.resize(nV, 0); thrust::device_ptr<int> v_ptr = thrust::device_pointer_cast(vid); thrust::scatter(rmm::exec_policy(stream)->on(stream), c_ptr, c_ptr + len, v_ptr, c.begin()); c_ptr = thrust::device_pointer_cast(c.data().get()); cugraph::add_edge_list(in_graph); thrust::device_ptr<int> src = thrust::device_pointer_cast(static_cast<int*>(in_graph->edgeList->src_indices->data)); thrust::device_ptr<int> dst = thrust::device_pointer_cast(static_cast<int*>(in_graph->edgeList->dest_indices->data)); //Count number of edges in the input graph that satisfy kcore conditions //i.e. core_num[src] and core_num[dst] are both greater than or equal to k gdf_size_type nE = in_graph->edgeList->src_indices->size; auto edge = thrust::make_zip_iterator(thrust::make_tuple(src, dst)); int filteredEdgeCount = thrust::count_if(rmm::exec_policy(stream)->on(stream), edge, edge + nE, detail::FilterEdges(k, c_ptr)); //Extract the relevant edges that have satisfied k-core conditions and put them in the output graph if (in_graph->edgeList->edge_data != nullptr) { switch (in_graph->edgeList->edge_data->dtype) { case GDF_FLOAT32: return detail::extract_edges<float> (in_graph, out_graph, c_ptr, k, filteredEdgeCount); case GDF_FLOAT64: return detail::extract_edges<double>(in_graph, out_graph, c_ptr, k, filteredEdgeCount); default: CUGRAPH_FAIL("Unsupported data type: edge data needs to be float32 or float64"); } } else { return detail::extract_edges<float> (in_graph, out_graph, c_ptr, k, filteredEdgeCount); } } void core_number(Graph *graph, gdf_column *core_number) { CHECK_GRAPH(graph) CUGRAPH_EXPECTS(graph->adjList->offsets->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(graph->adjList->indices->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(core_number->dtype == GDF_INT32, "Unsupported data type: core number needs to be int32"); CUGRAPH_EXPECTS(core_number->size == graph->numberOfVertices, "Column size mismatch"); return detail::core_number_impl(graph, static_cast<int*>(core_number->data)); } void k_core(Graph *in_graph, int k, gdf_column *vertex_id, gdf_column *core_number, Graph *out_graph) { CUGRAPH_EXPECTS(out_graph != nullptr, "Invalid API parameter: out_graph is NULL"); CUGRAPH_EXPECTS(in_graph != nullptr, "Invalid API parameter: in_graph is NULL"); gdf_size_type nV = in_graph->numberOfVertices; CUGRAPH_EXPECTS(in_graph->adjList->offsets->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS(in_graph->adjList->indices->dtype == GDF_INT32, "Unsupported data type: graph needs to be int32"); CUGRAPH_EXPECTS((vertex_id != nullptr) && (core_number != nullptr), "Invalid API parameter"); CUGRAPH_EXPECTS(vertex_id->dtype == GDF_INT32, "Unsupported data type"); CUGRAPH_EXPECTS(core_number->dtype == GDF_INT32, "Unsupported data type"); CUGRAPH_EXPECTS(core_number->size == vertex_id->size, "Invalid API parameter"); CUGRAPH_EXPECTS(core_number->size == nV, "Invalid API parameter"); CUGRAPH_EXPECTS(k >= 0, "Invalid API parameter"); int * vertex_identifier_ptr = static_cast<int*>(vertex_id->data); int * core_number_ptr = static_cast<int*>(core_number->data); gdf_size_type vLen = vertex_id->size; extract_subgraph(in_graph, out_graph, vertex_identifier_ptr, core_number_ptr, k, vLen, nV); } } //namespace cugraph
cd9fd11df1e968eb38b8dc3af25c34896ec270bc.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of ComputeStuff copyright (C) 2020 Christopher Dyken. // Released under the MIT license, please see LICENSE file for details. #define USE_NVTOOLS_EXT #ifdef USE_NVTOOLS_EXT #include <roctracer/roctx.h> #endif #include <hip/hip_runtime_api.h> #include <glad/gl.h> #include <GLFW/glfw3.h> #include <cuda_gl_interop.h> #include <cmath> #include <cassert> #include <vector> #include <string> #include <iostream> #include <fstream> #include <chrono> #include <MC.h> using namespace ComputeStuff::MC; namespace { enum struct FieldFormat : uint32_t { UInt8, UInt16, Float }; FieldFormat format = FieldFormat::Float; uint3 field_size = make_uint3(256, 256, 256); bool wireframe = false; bool recreate_context = true; bool indexed = true; enum LogLevels { ALWAYS = 0, ERROR = 1, WARNING = 2, INFO = 3, DEBUG = 4, TRACE = 5 }; uint32_t loglevel = 4; #define LOG_ALWAYS(msg, ...) do { fputs("[A] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr); } while (0) #define LOG_ERROR(msg, ...) do { if(ERROR <= loglevel) { fputs("[E] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_WARNING(msg, ...) do { if(WARNING <= loglevel) { fputs("[W] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_INFO(msg, ...) do { if(INFO <= loglevel) { fputs("[I] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_DEBUG(msg, ...) do { if(DEBUG <= loglevel) { fputs("[D] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_TRACE(msg, ...) do { if(TRACE <= loglevel) { fputs("[T] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) float threshold = 0.f; std::vector<char> scalarField_host; void onGLFWError(int error, const char* what) { LOG_ERROR("GLFW Error: %s", what); } void onKey(GLFWwindow* window, int key, int scancode, int action, int mods) { bool print_threshold = false; if (action == GLFW_PRESS) { if (key == GLFW_KEY_W) { wireframe = !wireframe; LOG_INFO("Wireframe: %s", wireframe ? "on" : "off"); } else if (key == GLFW_KEY_UP) { threshold += 10.f; print_threshold = true; } else if (key == GLFW_KEY_DOWN) { threshold -= 10.f; print_threshold = true; } else if (key == GLFW_KEY_RIGHT) { threshold += 0.01f; print_threshold = true; } else if (key == GLFW_KEY_LEFT) { threshold -= 0.01f; print_threshold = true; } else if (key == GLFW_KEY_BACKSPACE) { threshold = 0.f; print_threshold = true; } else if (key == GLFW_KEY_I) { indexed = !indexed; recreate_context = true; LOG_INFO("Mode is %s", indexed ? "indexed" : "non-indexed"); } if (print_threshold) { LOG_INFO("Iso-value: %f", threshold); } } } const std::string simpleVS_src = R"(#version 430 in layout(location=0) vec3 inPosition; in layout(location=1) vec3 inNormal; out vec3 normal; uniform layout(location=0) mat4 MV; uniform layout(location=1) mat4 MVP; void main() { normal = mat3(MV)*inNormal; gl_Position = MVP * vec4(inPosition, 1); } )"; const std::string simpleFS_src = R"(#version 430 in vec3 normal; out layout(location=0) vec4 outColor; uniform layout(location=2) vec4 color; void main() { float d = max(0.0, dot(vec3(0,0,1), normalize(gl_FrontFacing ? -normal : normal))); if(gl_FrontFacing) outColor = d * color.rgba; else outColor = color.bgra; } )"; const std::string solidVS_src = R"(#version 430 in layout(location=0) vec3 inPosition; uniform layout(location=0) mat4 MV; uniform layout(location=1) mat4 MVP; void main() { gl_Position = MVP * vec4(inPosition, 1); } )"; const std::string solidFS_src = R"(#version 430 out layout(location=0) vec4 outColor; uniform layout(location=2) vec4 color; void main() { outColor = color.rgba; } )"; [[noreturn]] void handleOpenGLError(GLenum error, const std::string file, int line) { do { switch (error) { case GL_INVALID_ENUM: LOG_ERROR("GL_INVALID_ENUM"); break; case GL_INVALID_VALUE: LOG_ERROR("GL_INVALID_VALUE"); break; case GL_INVALID_OPERATION: LOG_ERROR("GL_INVALID_OPERATION"); break; case GL_INVALID_FRAMEBUFFER_OPERATION: LOG_ERROR("GL_INVALID_FRAMEBUFFER_OPERATION"); break; case GL_OUT_OF_MEMORY: LOG_ERROR("GL_OUT_OF_MEMORY"); break; case GL_STACK_OVERFLOW: LOG_ERROR("GL_STACK_OVERFLOW"); break; case GL_STACK_UNDERFLOW: LOG_ERROR("GL_STACK_UNDERFLOW"); break; default: LOG_ERROR("Unknown error"); break; } error = glGetError(); } while (error != GL_NO_ERROR); exit(EXIT_FAILURE); } #define CHECK_GL do { GLenum error = glGetError(); if(error != GL_NO_ERROR) handleOpenGLError(error, __FILE__, __LINE__); } while(0) [[noreturn]] void handleCudaError(hipError_t error, const std::string file, int line) { LOG_ERROR("%s@%d: CUDA: %s", file.c_str(), line, hipGetErrorString(error)); exit(EXIT_FAILURE); } #define CHECK_CUDA do { hipError_t error = hipGetLastError(); if(error != hipSuccess) handleCudaError(error, __FILE__, __LINE__); } while(0) #define CHECKED_CUDA(a) do { hipError_t error = (a); if(error != hipSuccess) handleCudaError(error, __FILE__, __LINE__); } while(0) GLuint createShader(const std::string& src, GLenum shader_type) { GLuint shader = glCreateShader(shader_type); const char* src_array[] = { src.c_str() }; glShaderSource(shader, 1, src_array, nullptr); glCompileShader(shader); GLsizei bufSize; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &bufSize); if (bufSize) { LOG_WARNING("Source:\n%s", src.c_str()); std::vector<char> log(bufSize + 1); glGetShaderInfoLog(shader, bufSize + 1, nullptr, log.data()); LOG_WARNING("Compilator output:\n%s", log.data()); } GLint status; glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (status != GL_TRUE) { glDeleteShader(shader); return 0; } return shader; } GLuint createProgram(GLuint VS, GLuint FS) { GLuint program = glCreateProgram(); glAttachShader(program, VS); glAttachShader(program, FS); glLinkProgram(program); GLsizei bufSize; glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufSize); if (bufSize) { std::vector<char> log(bufSize + 1); glGetProgramInfoLog(program, bufSize + 1, nullptr, log.data()); LOG_WARNING("Linker output:\n%s", log.data()); } GLint status; glGetProgramiv(program, GL_LINK_STATUS, &status); if (status != GL_TRUE) { glDeleteProgram(program); return 0; } return program; } GLuint createBuffer(GLenum target, GLenum usage, size_t size, const void* data) { GLuint buffer = 0; glGenBuffers(1, &buffer); glBindBuffer(target, buffer); glBufferData(target, size, data, usage); CHECK_GL; return buffer; } void rotMatrixX(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = 1.f; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = c; dst[4 * 1 + 2] = s; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = -s; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void rotMatrixY(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = c; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = -s; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = 1.f; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = s; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void rotMatrixZ(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = c; dst[4 * 0 + 1] = s; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = -s; dst[4 * 1 + 1] = c; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = 1.f; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void translateMatrix(float* dst, const float x, const float y, const float z) { dst[4 * 0 + 0] = 1.f; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = 1.f; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = 1.f; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = x; dst[4 * 3 + 1] = y; dst[4 * 3 + 2] = z; dst[4 * 3 + 3] = 1.f; } void frustumMatrix(float* dst, const float w, const float h, const float n, const float f) { auto a = 2.f * n / w; auto b = 2.f * n / h; auto c = -(f + n) / (f - n); auto d = -2.f * f * n / (f - n); dst[4 * 0 + 0] = a; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = b; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = -1.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0; dst[4 * 3 + 2] = d; dst[4 * 3 + 3] = 0.f; } void matrixMul4(float* D, const float* A, const float* B) { for (unsigned i = 0; i < 4; i++) { for (unsigned j = 0; j < 4; j++) { float sum = 0.f; for (unsigned k = 0; k < 4; k++) { sum += A[4 * k + j] * B[4 * i + k]; } D[4 * i + j] = sum; } } } void buildTransforms(float* normal_matrix, float* modelview_projection, const int width, const int height, double seconds) { float center[16]; translateMatrix(center, -0.5f, -0.5f, -0.5f); float rx[16]; rotMatrixX(rx, static_cast<float>(0.3 * seconds)); float ry[16]; rotMatrixY(ry, static_cast<float>(0.7 * seconds)); float rz[16]; rotMatrixZ(rz, static_cast<float>(0.5 * seconds)); float shift[16]; translateMatrix(shift, 0.f, 0.f, -2.0f); float frustum[16]; frustumMatrix(frustum, float(width) / float(height), 1.f, 1.f, 8.f); float rx_center[16]; matrixMul4(rx_center, rx, center); float ry_rx[16]; matrixMul4(ry_rx, ry, rx_center); matrixMul4(normal_matrix, rz, ry_rx); float shift_rz_ry_rx[16]; matrixMul4(shift_rz_ry_rx, shift, normal_matrix); matrixMul4(modelview_projection, frustum, shift_rz_ry_rx); } constexpr float cayley(unsigned i, unsigned j, unsigned k, uint3 field_size) { float x = (2.f * i) / (field_size.x - 1.f) - 1.f; float y = (2.f * j) / (field_size.y - 1.f) - 1.f; float z = (2.f * k) / (field_size.z - 1.f) - 1.f; float v = 1.f - 16.f * x * y * z - 4.f * (x * x + y * y + z * z); return v; } GLfloat wireBoxVertexData[] = { 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 0.f, 0.f, 1.f, 1.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f, 1.f, 1.f, 1.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 0.f, 1.f, 1.f, 1.f }; void buildCayleyField() { const size_t N = static_cast<size_t>(field_size.x) * field_size.y * field_size.z; switch (format) { case FieldFormat::UInt8: { scalarField_host.resize(N); auto* dst = reinterpret_cast<uint8_t*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { float v = cayley(i, j, k, field_size); v = 0.5f * 255.f * (v + 1.f); if (v < 0.f) v = 0.f; if (255.f < v) v = 255.f; *dst++ = static_cast<uint8_t>(v); } } } break; } case FieldFormat::UInt16: { scalarField_host.resize(sizeof(uint16_t) * N); auto* dst = reinterpret_cast<uint16_t*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { float v = cayley(i, j, k, field_size); v = 0.5f * 65535.f * (v + 1.f); if (v < 0.f) v = 0.f; if (65535.f < v) v = 65535.f; *dst++ = static_cast<uint16_t>(v); } } } break; } case FieldFormat::Float: { scalarField_host.resize(sizeof(float) * N); auto* dst = reinterpret_cast<float*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { *dst++ = cayley(i, j, k, field_size); } } } break; } default: assert(false && "Unhandled case"); break; } } bool readFile(const char* path) { assert(path); LOG_INFO("Reading %s...", path); FILE* fp = fopen(path, "rb"); if (!fp) { LOG_ERROR("Error opening file \"%s\" for reading.", path); return false; } if (fseek(fp, 0L, SEEK_END) == 0) { uint8_t header[6]; long size = ftell(fp); if (sizeof(header) <= size) { if (fseek(fp, 0L, SEEK_SET) == 0) { if (fread(header, sizeof(header), 1, fp) == 1) { field_size.x = header[0] | header[1] << 8; field_size.y = header[2] | header[3] << 8; field_size.z = header[4] | header[5] << 8; size_t N = static_cast<size_t>(field_size.x) * field_size.y * field_size.z; if ((N + 3) * 2 != size) { LOG_ERROR("Unexpected file size."); } else { std::vector<uint8_t> tmp(2 * N); if (fread(tmp.data(), 2, N, fp) == N) { switch (format) { case FieldFormat::UInt8: { scalarField_host.resize(N); auto* dst = reinterpret_cast<uint8_t*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = v >> 4; // 12 bits are in use. } break; } case FieldFormat::UInt16: { scalarField_host.resize(sizeof(uint16_t) * N); auto* dst = reinterpret_cast<uint16_t*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = v; } break; } case FieldFormat::Float: { scalarField_host.resize(sizeof(float) * N); auto* dst = reinterpret_cast<float*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = static_cast<float>(v); } break; } default: assert(false && "Unhandled case"); } LOG_INFO("Successfully loaded %s", path); fclose(fp); return true; } } } } } } LOG_ERROR("Error loading \"%s\"", path); fclose(fp); return false; } void setupScalarField(float*& scalar_field_d, const char* path, const uint3& field_size, hipStream_t stream) { // Set up scalar field if (!path) { LOG_ERROR("No input file specified."); exit(EXIT_FAILURE); } else if (strcmp("cayley", path) == 0) { buildCayleyField(); } else if (!readFile(path)) { exit(EXIT_FAILURE); } assert(static_cast<size_t>(field_size.x) * field_size.y * field_size.z * 4 == scalarField_host.size()); LOG_INFO("Scalar field is [%d x %d x %d] (%d cells total)", field_size.x, field_size.y, field_size.z, field_size.x * field_size.y * field_size.z); CHECKED_CUDA(hipMalloc(&scalar_field_d, scalarField_host.size())); CHECKED_CUDA(hipMemcpyAsync(scalar_field_d, scalarField_host.data(), scalarField_host.size(), hipMemcpyHostToDevice, stream)); } void initWindowAndGL(GLFWwindow*& win, GLuint& shadedProg, GLuint& solidProg) { glfwSetErrorCallback(onGLFWError); if (!glfwInit()) { LOG_ERROR("GLFW failed to initialize."); exit(EXIT_FAILURE); } glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); win = glfwCreateWindow(1280, 720, "Marching cubes test application", nullptr, nullptr); glfwSetKeyCallback(win, onKey); glfwMakeContextCurrent(win); gladLoadGL(glfwGetProcAddress); GLuint simpleVS = createShader(simpleVS_src, GL_VERTEX_SHADER); assert(simpleVS != 0); GLuint simpleFS = createShader(simpleFS_src, GL_FRAGMENT_SHADER); assert(simpleFS != 0); shadedProg = createProgram(simpleVS, simpleFS); assert(shadedProg != 0); GLuint solidVS = createShader(solidVS_src, GL_VERTEX_SHADER); assert(solidVS != 0); GLuint solidFS = createShader(solidFS_src, GL_FRAGMENT_SHADER); assert(solidFS != 0); solidProg = createProgram(solidVS, solidFS); assert(solidProg != 0); } } int main(int argc, char** argv) { hipStream_t stream; const char* path = nullptr; int deviceIndex = 0; bool benchmark = false; for (int i = 1; i < argc; i++) { if (i + 1 < argc && (strcmp(argv[i], "-d") == 0 || strcmp(argv[i], "--device") == 0)) { deviceIndex = std::atoi(argv[i + 1]); i++; } else if (i + 1 < argc && strcmp(argv[i], "-nx") == 0) { field_size.x = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-ny") == 0) { field_size.y = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-nz") == 0) { field_size.z = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-n") == 0) { field_size.x = uint32_t(std::atoi(argv[i + 1])); field_size.y = field_size.x; field_size.z = field_size.x; i++; } else if (i + 1 < argc && strcmp(argv[i], "-i") == 0) { threshold = static_cast<float>(std::atof(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-l") == 0) { loglevel = uint32_t(std::atoi(argv[i + 1])); i++; } #if 0 // Currently only float is supported else if (i + 1 < argc && strcmp(argv[i], "-f") == 0) { if (strcmp(argv[i + 1], "uint8") == 0) { format = FieldFormat::UInt8; } else if (strcmp(argv[i + 1], "uint16") == 0) { format = FieldFormat::UInt16; } else if (strcmp(argv[i + 1], "float") == 0) { format = FieldFormat::Float; } else { fprintf(stderr, "Unknown format '%s'", argv[i + 1]); return EXIT_FAILURE; } i++; } #endif else if ((strcmp(argv[i], "-b") == 0) || (strcmp(argv[i], "--benchmark") == 0)) { benchmark = true; } else if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) { fprintf(stderr, "HP5 Marching Cubes test application.\n"); fprintf(stderr, "Copyright (C) 2020 Christopher Dyken. Released under the MIT license\n\n"); fprintf(stderr, "Usage: %s [options] [dataset]\n\n", argv[0]); fprintf(stderr, "Options:\n"); fprintf(stderr, " -d int Choose CUDA device.\n"); fprintf(stderr, " -nx int Set number of samples in x direction.\n"); fprintf(stderr, " -nx int Set number of samples in y direction.\n"); fprintf(stderr, " -nx int Set number of samples in z direction.\n"); fprintf(stderr, " -n int Set uniform number of samples in x,y,z directions.\n"); fprintf(stderr, " -i float Set iso-value to extract surface for.\n"); fprintf(stderr, " -l int Log-level, higher is more verbose.\n"); fprintf(stderr, " -b Enable benchmark mode without OpenGL interop.\n"); fprintf(stderr, "\nDataset:\n"); fprintf(stderr, " cayley Built-in algebraic surface.\n"); fprintf(stderr, " file.dat Raw binary uint16_t data with three binary uint16_t in front with x,y,z size.\n"); fprintf(stderr, "\nKey bindings:\n"); fprintf(stderr, " right/left Increase/decrease threshold by 100.\n"); fprintf(stderr, " up/down Increase/decrease threshold by 0.1.\n"); fprintf(stderr, " w Enable/disable wireframe.\n"); return 0; } else { if (path) { LOG_ERROR("%s: input already specified", argv[i]); return EXIT_FAILURE; } path = argv[i]; } } if (benchmark) { int deviceCount = 0; CHECKED_CUDA(hipGetDeviceCount(&deviceCount)); bool found = false; for (int i = 0; i < deviceCount; i++) { hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop, i); LOG_INFO("%c[%i] %s cap=%d.%d", i == deviceIndex ? '*' : ' ', i, dev_prop.name, dev_prop.major, dev_prop.minor); if (i == deviceIndex) { found = true; } } if (!found) { LOG_ERROR("Illegal CUDA device index %d", deviceIndex); return EXIT_FAILURE; } hipSetDevice(deviceIndex); CHECKED_CUDA(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); // Create events for timing static const unsigned eventNum = 32; hipEvent_t events[2 * eventNum]; for (size_t i = 0; i < 2 * eventNum; i++) { CHECKED_CUDA(hipEventCreate(&events[i])); CHECKED_CUDA(hipEventRecord(events[i], stream)); } size_t free, total; CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); float* scalar_field_d = nullptr; setupScalarField(scalar_field_d, path, field_size, stream); LOG_INFO("Built scalar field"); CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); auto* tables = createTables(stream); struct { const char* name; bool indexed; bool sync; } benchmark_cases[] = { {"ix sync", true, true}, {"noix sync", false, true}, {"ix nosync", true, false}, {"noix nosync", false, false} }; float min_time = 0.5; for (auto& bc : benchmark_cases) { #ifdef USE_NVTOOLS_EXT roctxRangePush(bc.name); #endif auto* ctx = createContext(tables, field_size, true, stream); LOG_INFO("%12s: Created context.", bc.name); LOG_INFO("Grid size [%u x %u x %u]", ctx->grid_size.x, ctx->grid_size.y, ctx->grid_size.z); LOG_INFO("Chunks [%u x %u x %u] (= %u) cover=[%u x %u x %u]", ctx->chunks.x, ctx->chunks.y, ctx->chunks.z, ctx->chunk_total, 31 * ctx->chunks.x, 5 * ctx->chunks.y, 5 * ctx->chunks.z); LOG_INFO("Level vec4-offset vec4-size ( size)"); for (unsigned l = 0; l < ctx->levels; l++) { LOG_INFO("[%2d] %12d %10d (%8d)", l, ctx->level_offsets[l], ctx->level_sizes[l], 4 * ctx->level_sizes[l]); } LOG_INFO("Total %d, levels %d", ctx->total_size, ctx->levels); // Run with no output buffers to get size of output. ComputeStuff::MC::buildPN(ctx, nullptr, nullptr, 0, 0, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); uint32_t vertex_count = 0; uint32_t index_count = 0; ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); float* vertex_data_d = nullptr; CHECKED_CUDA(hipMalloc(&vertex_data_d, 6 * sizeof(float) * vertex_count)); uint32_t* index_data_d = nullptr; CHECKED_CUDA(hipMalloc(&index_data_d, sizeof(uint32_t)* index_count)); LOG_INFO("%12s: Allocated output buffers.", bc.name); LOG_INFO("%12s: Warming up", bc.name); for (unsigned i = 0; i < 100; i++) { ComputeStuff::MC::buildPN(ctx, vertex_data_d, index_data_d, 6 * sizeof(float) * vertex_count, sizeof(uint32_t) * index_count, field_size.x, field_size.x * field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); if (bc.sync) { ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); } } LOG_INFO("%12s: Benchmarking", bc.name); auto start = std::chrono::high_resolution_clock::now(); double elapsed = 0.f; float cuda_ms = 0.f; unsigned iterations = 0; unsigned cuda_ms_n = 0; #ifdef USE_NVTOOLS_EXT roctxRangePush("Benchmark runs"); #endif while (iterations < 100 || elapsed < min_time) { CHECKED_CUDA(hipEventRecord(events[2 * (iterations % eventNum) + 0], stream)); ComputeStuff::MC::buildPN(ctx, vertex_data_d, index_data_d, 6 * sizeof(float) * vertex_count, sizeof(uint32_t) * index_count, field_size.x, field_size.x * field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); if (bc.sync) { ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); } CHECKED_CUDA(hipEventRecord(events[2 * (iterations % eventNum) + 1], stream)); if (eventNum <= iterations) { float ms = 0; if (!bc.sync) { CHECKED_CUDA(hipEventSynchronize(events[2 * ((iterations + 1) % eventNum) + 1])); } CHECKED_CUDA(hipEventElapsedTime(&ms, events[2 * ((iterations + 1) % eventNum) + 0], events[2 * ((iterations + 1) % eventNum) + 1])); cuda_ms += ms; cuda_ms_n++; } std::chrono::duration<double> span = std::chrono::high_resolution_clock::now() - start; elapsed = span.count(); iterations++; } #ifdef USE_NVTOOLS_EXT roctxRangePop(); #endif CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_ALWAYS("%12s: %.2f FPS (%.0fMVPS) cuda: %.2fms (%.0f MVPS) %ux%ux%u Nv=%u Ni=%u memfree=%zumb/%zumb", bc.name, iterations / elapsed, (float(iterations) * field_size.x * field_size.y * field_size.z) / (1000000.f * elapsed), cuda_ms / cuda_ms_n, (float(cuda_ms_n) * field_size.x * field_size.y * field_size.z) / (1000.f * cuda_ms), field_size.x, field_size.y, field_size.z, vertex_count, index_count, (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); freeContext(ctx, stream); CHECKED_CUDA(hipStreamSynchronize(stream)); CHECKED_CUDA(hipFree(vertex_data_d)); CHECKED_CUDA(hipFree(index_data_d)); CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_INFO("%12s: Released resources free=%zumb total=%zumb", bc.name, (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); #ifdef USE_NVTOOLS_EXT roctxRangePop(); #endif } LOG_ALWAYS("Exiting..."); CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); return 0; } GLFWwindow* win = nullptr; GLuint shadedProg = 0; GLuint solidProg = 0; initWindowAndGL(win, shadedProg, solidProg); unsigned int deviceCount; CHECKED_CUDA(hipGLGetDevices(&deviceCount, nullptr, 0, HIP_GL_DEVICE_LIST_ALL)); if (deviceCount == 0) { LOG_ERROR("No CUDA-enabled devices available."); return EXIT_FAILURE; } std::vector<int> devices(deviceCount); CHECKED_CUDA(hipGLGetDevices(&deviceCount, devices.data(), deviceCount, HIP_GL_DEVICE_LIST_ALL)); bool found = false; for (unsigned k = 0; k < deviceCount; k++) { int i = devices[k]; hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop, i); LOG_INFO("%c[%i] %s cap=%d.%d", i == deviceIndex ? '*' : ' ', i, dev_prop.name, dev_prop.major, dev_prop.minor); if (i == deviceIndex) { found = true; } } if (!found) { LOG_ERROR("Illegal CUDA device index %d", deviceIndex); return EXIT_FAILURE; } hipSetDevice(deviceIndex); CHECKED_CUDA(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); // Set up scalar field float* scalar_field_d = nullptr; setupScalarField(scalar_field_d, path, field_size, stream); auto* tables = createTables(stream); GLuint wireBoxVertexBuffer = createBuffer(GL_ARRAY_BUFFER, GL_STATIC_DRAW, sizeof(wireBoxVertexData), wireBoxVertexData); uint32_t wireBoxVertexCount = sizeof(wireBoxVertexData) / (3 * sizeof(float)); GLuint wireBoxVbo = 0; glGenVertexArrays(1, &wireBoxVbo); glBindVertexArray(wireBoxVbo); glBindBuffer(GL_ARRAY_BUFFER, wireBoxVertexBuffer); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, nullptr); glEnableVertexAttribArray(0); unsigned eventCounter = 0; hipEvent_t events[2 * 4]; for (size_t i = 0; i < 2 * 4; i++) { CHECKED_CUDA(hipEventCreate(&events[i])); CHECKED_CUDA(hipEventRecord(events[i], stream)); } GLuint cudaVertexBuf = createBuffer(GL_ARRAY_BUFFER, GL_STREAM_DRAW, 3 * sizeof(float), nullptr); cudaGraphicsResource* vertexBufferResource = nullptr; CHECKED_CUDA(hipGraphicsGLRegisterBuffer(&vertexBufferResource, cudaVertexBuf, hipGraphicsRegisterFlagsWriteDiscard)); GLuint cudaIndexBuf = createBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_STREAM_DRAW, 3 * sizeof(uint32_t), nullptr); cudaGraphicsResource* indexBufferResource = nullptr; CHECKED_CUDA(hipGraphicsGLRegisterBuffer(&indexBufferResource, cudaIndexBuf, hipGraphicsRegisterFlagsWriteDiscard)); GLuint cudaVbo = 0; glGenVertexArrays(1, &cudaVbo); glBindVertexArray(cudaVbo); glBindBuffer(GL_ARRAY_BUFFER, cudaVertexBuf); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, nullptr); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, (void*)(sizeof(float) * 3)); glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); auto start = std::chrono::system_clock::now(); auto timer = std::chrono::high_resolution_clock::now(); float cuda_ms = 0.f; unsigned frames = 0u; ComputeStuff::MC::Context* ctx = nullptr; while (!glfwWindowShouldClose(win)) { int width, height; glfwGetWindowSize(win, &width, &height); uint32_t vertex_count = 0; uint32_t index_count = 0; { if (ctx == nullptr || recreate_context) { freeContext(ctx, stream); ctx = createContext(tables, field_size, indexed, stream); recreate_context = false; } float* cudaVertexBuf_d = nullptr; size_t cudaVertexBuf_size = 0; uint32_t* cudaIndexBuf_d = nullptr; size_t cudaIndexBuf_size = 0; CHECKED_CUDA(hipGraphicsMapResources(1, &vertexBufferResource, stream)); CHECKED_CUDA(hipGraphicsResourceGetMappedPointer((void**)&cudaVertexBuf_d, &cudaVertexBuf_size, vertexBufferResource)); if (indexed) { CHECKED_CUDA(hipGraphicsMapResources(1, &indexBufferResource, stream)); CHECKED_CUDA(hipGraphicsResourceGetMappedPointer((void**)&cudaIndexBuf_d, &cudaIndexBuf_size, indexBufferResource)); } CHECKED_CUDA(hipEventRecord(events[2 * eventCounter + 0], stream)); ComputeStuff::MC::buildPN(ctx, cudaVertexBuf_d, cudaIndexBuf_d, cudaVertexBuf_size, cudaIndexBuf_size, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); CHECKED_CUDA(hipEventRecord(events[2 * eventCounter + 1], stream)); CHECKED_CUDA(hipGraphicsUnmapResources(1, &vertexBufferResource, stream)); if (indexed) { CHECKED_CUDA(hipGraphicsUnmapResources(1, &indexBufferResource, stream)); } ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); eventCounter = (eventCounter + 1) & 3; float ms = 0; CHECKED_CUDA(hipEventElapsedTime(&ms, events[2 * eventCounter + 0], events[2 * eventCounter + 1])); cuda_ms += ms; bool vertexBufTooSmall = cudaVertexBuf_size < 6 * sizeof(float) * vertex_count; bool indexBufTooSmall = cudaIndexBuf_size < sizeof(uint32_t)* index_count; if (vertexBufTooSmall || indexBufTooSmall) { CHECKED_CUDA(hipGraphicsUnregisterResource(vertexBufferResource)); CHECKED_CUDA(hipGraphicsUnregisterResource(indexBufferResource)); if (vertexBufTooSmall) { size_t newVertexBufSize = 6 * sizeof(float) * (static_cast<size_t>(vertex_count) + vertex_count / 16); glBindBuffer(GL_ARRAY_BUFFER, cudaVertexBuf); glBufferData(GL_ARRAY_BUFFER, newVertexBufSize, nullptr, GL_STREAM_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); LOG_INFO("Resizing: vbuf=%zub", newVertexBufSize); } if (indexBufTooSmall) { size_t newIndexBufSize = sizeof(uint32_t) * (index_count + index_count / 16); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glBufferData(GL_ELEMENT_ARRAY_BUFFER, newIndexBufSize, nullptr, GL_STREAM_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); LOG_INFO("Resizing: ibuf=%zub", newIndexBufSize); } CHECKED_CUDA(hipGraphicsGLRegisterBuffer(&vertexBufferResource, cudaVertexBuf, hipGraphicsRegisterFlagsWriteDiscard)); CHECKED_CUDA(hipGraphicsGLRegisterBuffer(&indexBufferResource, cudaIndexBuf, hipGraphicsRegisterFlagsWriteDiscard)); CHECKED_CUDA(hipGraphicsMapResources(1, &vertexBufferResource, stream)); CHECKED_CUDA(hipGraphicsResourceGetMappedPointer((void**)&cudaVertexBuf_d, &cudaVertexBuf_size, vertexBufferResource)); if (indexed) { CHECKED_CUDA(hipGraphicsMapResources(1, &indexBufferResource, stream)); CHECKED_CUDA(hipGraphicsResourceGetMappedPointer((void**)&cudaIndexBuf_d, &cudaIndexBuf_size, indexBufferResource)); } ComputeStuff::MC::buildPN(ctx, cudaVertexBuf_d, cudaIndexBuf_d, cudaVertexBuf_size, cudaIndexBuf_size, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, false, indexed); CHECKED_CUDA(hipGraphicsUnmapResources(1, &vertexBufferResource, stream)); if (indexed) { CHECKED_CUDA(hipGraphicsUnmapResources(1, &indexBufferResource, stream)); } } } glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); std::chrono::duration<double> elapsed = std::chrono::system_clock::now() - start; float normal_matrix[16]; float modelview_projection[16]; buildTransforms(normal_matrix, modelview_projection, width, height, elapsed.count()); glEnable(GL_DEPTH_TEST); glPolygonOffset(0.f, 1.f); if (wireframe) { glEnable(GL_POLYGON_OFFSET_FILL); } glBindVertexArray(cudaVbo); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); glUseProgram(shadedProg); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 0.6f, 0.6f, 0.8f, 1.f); if (indexed) { glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, nullptr); } else { glDrawArrays(GL_TRIANGLES, 0, vertex_count); } glDisable(GL_POLYGON_OFFSET_FILL); if (wireframe) { glUseProgram(solidProg); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 1.f, 1.f, 1.f, 1.f); if (indexed) { glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, nullptr); } else { glDrawArrays(GL_TRIANGLES, 0, vertex_count); } glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); } glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindVertexArray(wireBoxVbo); glUseProgram(solidProg); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 1.f, 1.f, 1.f, 1.f); glDrawArrays(GL_LINES, 0, wireBoxVertexCount); glfwSwapBuffers(win); glfwPollEvents(); { frames++; auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = now - timer; auto s = elapsed.count(); if (10 < frames && 3.0 < s) { size_t free, total; CHECKED_CUDA(hipMemGetInfo(&free, &total)); LOG_INFO("%.2f FPS (%.2f MVPS) cuda avg: %.2fms (%.2f MVPS) %ux%ux%u Nv=%u Ni=%u ix=%s memfree=%zumb/%zumb", frames / s, (float(frames)* field_size.x* field_size.y* field_size.z) / (1000000.f * s), cuda_ms / frames, (float(frames)* field_size.x* field_size.y* field_size.z) / (1000.f * cuda_ms), field_size.x, field_size.y, field_size.z, vertex_count, index_count, indexed ? "y" : "n", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); timer = now; frames = 0; cuda_ms = 0.f; } } } glfwDestroyWindow(win); glfwTerminate(); return EXIT_SUCCESS; }
cd9fd11df1e968eb38b8dc3af25c34896ec270bc.cu
// This file is part of ComputeStuff copyright (C) 2020 Christopher Dyken. // Released under the MIT license, please see LICENSE file for details. #define USE_NVTOOLS_EXT #ifdef USE_NVTOOLS_EXT #include <nvToolsExt.h> #endif #include <cuda_runtime_api.h> #include <glad/gl.h> #include <GLFW/glfw3.h> #include <cuda_gl_interop.h> #include <cmath> #include <cassert> #include <vector> #include <string> #include <iostream> #include <fstream> #include <chrono> #include <MC.h> using namespace ComputeStuff::MC; namespace { enum struct FieldFormat : uint32_t { UInt8, UInt16, Float }; FieldFormat format = FieldFormat::Float; uint3 field_size = make_uint3(256, 256, 256); bool wireframe = false; bool recreate_context = true; bool indexed = true; enum LogLevels { ALWAYS = 0, ERROR = 1, WARNING = 2, INFO = 3, DEBUG = 4, TRACE = 5 }; uint32_t loglevel = 4; #define LOG_ALWAYS(msg, ...) do { fputs("[A] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr); } while (0) #define LOG_ERROR(msg, ...) do { if(ERROR <= loglevel) { fputs("[E] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_WARNING(msg, ...) do { if(WARNING <= loglevel) { fputs("[W] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_INFO(msg, ...) do { if(INFO <= loglevel) { fputs("[I] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_DEBUG(msg, ...) do { if(DEBUG <= loglevel) { fputs("[D] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) #define LOG_TRACE(msg, ...) do { if(TRACE <= loglevel) { fputs("[T] ", stderr); fprintf(stderr, msg, ##__VA_ARGS__); fputc('\n', stderr);} } while (0) float threshold = 0.f; std::vector<char> scalarField_host; void onGLFWError(int error, const char* what) { LOG_ERROR("GLFW Error: %s", what); } void onKey(GLFWwindow* window, int key, int scancode, int action, int mods) { bool print_threshold = false; if (action == GLFW_PRESS) { if (key == GLFW_KEY_W) { wireframe = !wireframe; LOG_INFO("Wireframe: %s", wireframe ? "on" : "off"); } else if (key == GLFW_KEY_UP) { threshold += 10.f; print_threshold = true; } else if (key == GLFW_KEY_DOWN) { threshold -= 10.f; print_threshold = true; } else if (key == GLFW_KEY_RIGHT) { threshold += 0.01f; print_threshold = true; } else if (key == GLFW_KEY_LEFT) { threshold -= 0.01f; print_threshold = true; } else if (key == GLFW_KEY_BACKSPACE) { threshold = 0.f; print_threshold = true; } else if (key == GLFW_KEY_I) { indexed = !indexed; recreate_context = true; LOG_INFO("Mode is %s", indexed ? "indexed" : "non-indexed"); } if (print_threshold) { LOG_INFO("Iso-value: %f", threshold); } } } const std::string simpleVS_src = R"(#version 430 in layout(location=0) vec3 inPosition; in layout(location=1) vec3 inNormal; out vec3 normal; uniform layout(location=0) mat4 MV; uniform layout(location=1) mat4 MVP; void main() { normal = mat3(MV)*inNormal; gl_Position = MVP * vec4(inPosition, 1); } )"; const std::string simpleFS_src = R"(#version 430 in vec3 normal; out layout(location=0) vec4 outColor; uniform layout(location=2) vec4 color; void main() { float d = max(0.0, dot(vec3(0,0,1), normalize(gl_FrontFacing ? -normal : normal))); if(gl_FrontFacing) outColor = d * color.rgba; else outColor = color.bgra; } )"; const std::string solidVS_src = R"(#version 430 in layout(location=0) vec3 inPosition; uniform layout(location=0) mat4 MV; uniform layout(location=1) mat4 MVP; void main() { gl_Position = MVP * vec4(inPosition, 1); } )"; const std::string solidFS_src = R"(#version 430 out layout(location=0) vec4 outColor; uniform layout(location=2) vec4 color; void main() { outColor = color.rgba; } )"; [[noreturn]] void handleOpenGLError(GLenum error, const std::string file, int line) { do { switch (error) { case GL_INVALID_ENUM: LOG_ERROR("GL_INVALID_ENUM"); break; case GL_INVALID_VALUE: LOG_ERROR("GL_INVALID_VALUE"); break; case GL_INVALID_OPERATION: LOG_ERROR("GL_INVALID_OPERATION"); break; case GL_INVALID_FRAMEBUFFER_OPERATION: LOG_ERROR("GL_INVALID_FRAMEBUFFER_OPERATION"); break; case GL_OUT_OF_MEMORY: LOG_ERROR("GL_OUT_OF_MEMORY"); break; case GL_STACK_OVERFLOW: LOG_ERROR("GL_STACK_OVERFLOW"); break; case GL_STACK_UNDERFLOW: LOG_ERROR("GL_STACK_UNDERFLOW"); break; default: LOG_ERROR("Unknown error"); break; } error = glGetError(); } while (error != GL_NO_ERROR); exit(EXIT_FAILURE); } #define CHECK_GL do { GLenum error = glGetError(); if(error != GL_NO_ERROR) handleOpenGLError(error, __FILE__, __LINE__); } while(0) [[noreturn]] void handleCudaError(cudaError_t error, const std::string file, int line) { LOG_ERROR("%s@%d: CUDA: %s", file.c_str(), line, cudaGetErrorString(error)); exit(EXIT_FAILURE); } #define CHECK_CUDA do { cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) handleCudaError(error, __FILE__, __LINE__); } while(0) #define CHECKED_CUDA(a) do { cudaError_t error = (a); if(error != cudaSuccess) handleCudaError(error, __FILE__, __LINE__); } while(0) GLuint createShader(const std::string& src, GLenum shader_type) { GLuint shader = glCreateShader(shader_type); const char* src_array[] = { src.c_str() }; glShaderSource(shader, 1, src_array, nullptr); glCompileShader(shader); GLsizei bufSize; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &bufSize); if (bufSize) { LOG_WARNING("Source:\n%s", src.c_str()); std::vector<char> log(bufSize + 1); glGetShaderInfoLog(shader, bufSize + 1, nullptr, log.data()); LOG_WARNING("Compilator output:\n%s", log.data()); } GLint status; glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (status != GL_TRUE) { glDeleteShader(shader); return 0; } return shader; } GLuint createProgram(GLuint VS, GLuint FS) { GLuint program = glCreateProgram(); glAttachShader(program, VS); glAttachShader(program, FS); glLinkProgram(program); GLsizei bufSize; glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufSize); if (bufSize) { std::vector<char> log(bufSize + 1); glGetProgramInfoLog(program, bufSize + 1, nullptr, log.data()); LOG_WARNING("Linker output:\n%s", log.data()); } GLint status; glGetProgramiv(program, GL_LINK_STATUS, &status); if (status != GL_TRUE) { glDeleteProgram(program); return 0; } return program; } GLuint createBuffer(GLenum target, GLenum usage, size_t size, const void* data) { GLuint buffer = 0; glGenBuffers(1, &buffer); glBindBuffer(target, buffer); glBufferData(target, size, data, usage); CHECK_GL; return buffer; } void rotMatrixX(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = 1.f; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = c; dst[4 * 1 + 2] = s; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = -s; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void rotMatrixY(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = c; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = -s; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = 1.f; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = s; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void rotMatrixZ(float* dst, const float angle) { const auto c = std::cos(angle); const auto s = std::sin(angle); dst[4 * 0 + 0] = c; dst[4 * 0 + 1] = s; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = -s; dst[4 * 1 + 1] = c; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = 1.f; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0.f; dst[4 * 3 + 2] = 0.f; dst[4 * 3 + 3] = 1.f; } void translateMatrix(float* dst, const float x, const float y, const float z) { dst[4 * 0 + 0] = 1.f; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = 1.f; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = 1.f; dst[4 * 2 + 3] = 0.f; dst[4 * 3 + 0] = x; dst[4 * 3 + 1] = y; dst[4 * 3 + 2] = z; dst[4 * 3 + 3] = 1.f; } void frustumMatrix(float* dst, const float w, const float h, const float n, const float f) { auto a = 2.f * n / w; auto b = 2.f * n / h; auto c = -(f + n) / (f - n); auto d = -2.f * f * n / (f - n); dst[4 * 0 + 0] = a; dst[4 * 0 + 1] = 0.f; dst[4 * 0 + 2] = 0.f; dst[4 * 0 + 3] = 0.f; dst[4 * 1 + 0] = 0.f; dst[4 * 1 + 1] = b; dst[4 * 1 + 2] = 0.f; dst[4 * 1 + 3] = 0.f; dst[4 * 2 + 0] = 0.f; dst[4 * 2 + 1] = 0.f; dst[4 * 2 + 2] = c; dst[4 * 2 + 3] = -1.f; dst[4 * 3 + 0] = 0.f; dst[4 * 3 + 1] = 0; dst[4 * 3 + 2] = d; dst[4 * 3 + 3] = 0.f; } void matrixMul4(float* D, const float* A, const float* B) { for (unsigned i = 0; i < 4; i++) { for (unsigned j = 0; j < 4; j++) { float sum = 0.f; for (unsigned k = 0; k < 4; k++) { sum += A[4 * k + j] * B[4 * i + k]; } D[4 * i + j] = sum; } } } void buildTransforms(float* normal_matrix, float* modelview_projection, const int width, const int height, double seconds) { float center[16]; translateMatrix(center, -0.5f, -0.5f, -0.5f); float rx[16]; rotMatrixX(rx, static_cast<float>(0.3 * seconds)); float ry[16]; rotMatrixY(ry, static_cast<float>(0.7 * seconds)); float rz[16]; rotMatrixZ(rz, static_cast<float>(0.5 * seconds)); float shift[16]; translateMatrix(shift, 0.f, 0.f, -2.0f); float frustum[16]; frustumMatrix(frustum, float(width) / float(height), 1.f, 1.f, 8.f); float rx_center[16]; matrixMul4(rx_center, rx, center); float ry_rx[16]; matrixMul4(ry_rx, ry, rx_center); matrixMul4(normal_matrix, rz, ry_rx); float shift_rz_ry_rx[16]; matrixMul4(shift_rz_ry_rx, shift, normal_matrix); matrixMul4(modelview_projection, frustum, shift_rz_ry_rx); } constexpr float cayley(unsigned i, unsigned j, unsigned k, uint3 field_size) { float x = (2.f * i) / (field_size.x - 1.f) - 1.f; float y = (2.f * j) / (field_size.y - 1.f) - 1.f; float z = (2.f * k) / (field_size.z - 1.f) - 1.f; float v = 1.f - 16.f * x * y * z - 4.f * (x * x + y * y + z * z); return v; } GLfloat wireBoxVertexData[] = { 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 1.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 0.f, 0.f, 1.f, 1.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 1.f, 0.f, 0.f, 1.f, 1.f, 1.f, 0.f, 0.f, 1.f, 0.f, 1.f, 1.f, 1.f, 0.f, 1.f, 1.f, 1.f }; void buildCayleyField() { const size_t N = static_cast<size_t>(field_size.x) * field_size.y * field_size.z; switch (format) { case FieldFormat::UInt8: { scalarField_host.resize(N); auto* dst = reinterpret_cast<uint8_t*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { float v = cayley(i, j, k, field_size); v = 0.5f * 255.f * (v + 1.f); if (v < 0.f) v = 0.f; if (255.f < v) v = 255.f; *dst++ = static_cast<uint8_t>(v); } } } break; } case FieldFormat::UInt16: { scalarField_host.resize(sizeof(uint16_t) * N); auto* dst = reinterpret_cast<uint16_t*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { float v = cayley(i, j, k, field_size); v = 0.5f * 65535.f * (v + 1.f); if (v < 0.f) v = 0.f; if (65535.f < v) v = 65535.f; *dst++ = static_cast<uint16_t>(v); } } } break; } case FieldFormat::Float: { scalarField_host.resize(sizeof(float) * N); auto* dst = reinterpret_cast<float*>(scalarField_host.data()); for (unsigned k = 0; k < field_size.z; k++) { for (unsigned j = 0; j < field_size.y; j++) { for (unsigned i = 0; i < field_size.x; i++) { *dst++ = cayley(i, j, k, field_size); } } } break; } default: assert(false && "Unhandled case"); break; } } bool readFile(const char* path) { assert(path); LOG_INFO("Reading %s...", path); FILE* fp = fopen(path, "rb"); if (!fp) { LOG_ERROR("Error opening file \"%s\" for reading.", path); return false; } if (fseek(fp, 0L, SEEK_END) == 0) { uint8_t header[6]; long size = ftell(fp); if (sizeof(header) <= size) { if (fseek(fp, 0L, SEEK_SET) == 0) { if (fread(header, sizeof(header), 1, fp) == 1) { field_size.x = header[0] | header[1] << 8; field_size.y = header[2] | header[3] << 8; field_size.z = header[4] | header[5] << 8; size_t N = static_cast<size_t>(field_size.x) * field_size.y * field_size.z; if ((N + 3) * 2 != size) { LOG_ERROR("Unexpected file size."); } else { std::vector<uint8_t> tmp(2 * N); if (fread(tmp.data(), 2, N, fp) == N) { switch (format) { case FieldFormat::UInt8: { scalarField_host.resize(N); auto* dst = reinterpret_cast<uint8_t*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = v >> 4; // 12 bits are in use. } break; } case FieldFormat::UInt16: { scalarField_host.resize(sizeof(uint16_t) * N); auto* dst = reinterpret_cast<uint16_t*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = v; } break; } case FieldFormat::Float: { scalarField_host.resize(sizeof(float) * N); auto* dst = reinterpret_cast<float*>(scalarField_host.data()); for (size_t i = 0; i < N; i++) { const uint32_t v = tmp[2 * i + 0] | tmp[2 * i + 1] << 8; dst[i] = static_cast<float>(v); } break; } default: assert(false && "Unhandled case"); } LOG_INFO("Successfully loaded %s", path); fclose(fp); return true; } } } } } } LOG_ERROR("Error loading \"%s\"", path); fclose(fp); return false; } void setupScalarField(float*& scalar_field_d, const char* path, const uint3& field_size, cudaStream_t stream) { // Set up scalar field if (!path) { LOG_ERROR("No input file specified."); exit(EXIT_FAILURE); } else if (strcmp("cayley", path) == 0) { buildCayleyField(); } else if (!readFile(path)) { exit(EXIT_FAILURE); } assert(static_cast<size_t>(field_size.x) * field_size.y * field_size.z * 4 == scalarField_host.size()); LOG_INFO("Scalar field is [%d x %d x %d] (%d cells total)", field_size.x, field_size.y, field_size.z, field_size.x * field_size.y * field_size.z); CHECKED_CUDA(cudaMalloc(&scalar_field_d, scalarField_host.size())); CHECKED_CUDA(cudaMemcpyAsync(scalar_field_d, scalarField_host.data(), scalarField_host.size(), cudaMemcpyHostToDevice, stream)); } void initWindowAndGL(GLFWwindow*& win, GLuint& shadedProg, GLuint& solidProg) { glfwSetErrorCallback(onGLFWError); if (!glfwInit()) { LOG_ERROR("GLFW failed to initialize."); exit(EXIT_FAILURE); } glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); win = glfwCreateWindow(1280, 720, "Marching cubes test application", nullptr, nullptr); glfwSetKeyCallback(win, onKey); glfwMakeContextCurrent(win); gladLoadGL(glfwGetProcAddress); GLuint simpleVS = createShader(simpleVS_src, GL_VERTEX_SHADER); assert(simpleVS != 0); GLuint simpleFS = createShader(simpleFS_src, GL_FRAGMENT_SHADER); assert(simpleFS != 0); shadedProg = createProgram(simpleVS, simpleFS); assert(shadedProg != 0); GLuint solidVS = createShader(solidVS_src, GL_VERTEX_SHADER); assert(solidVS != 0); GLuint solidFS = createShader(solidFS_src, GL_FRAGMENT_SHADER); assert(solidFS != 0); solidProg = createProgram(solidVS, solidFS); assert(solidProg != 0); } } int main(int argc, char** argv) { cudaStream_t stream; const char* path = nullptr; int deviceIndex = 0; bool benchmark = false; for (int i = 1; i < argc; i++) { if (i + 1 < argc && (strcmp(argv[i], "-d") == 0 || strcmp(argv[i], "--device") == 0)) { deviceIndex = std::atoi(argv[i + 1]); i++; } else if (i + 1 < argc && strcmp(argv[i], "-nx") == 0) { field_size.x = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-ny") == 0) { field_size.y = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-nz") == 0) { field_size.z = uint32_t(std::atoi(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-n") == 0) { field_size.x = uint32_t(std::atoi(argv[i + 1])); field_size.y = field_size.x; field_size.z = field_size.x; i++; } else if (i + 1 < argc && strcmp(argv[i], "-i") == 0) { threshold = static_cast<float>(std::atof(argv[i + 1])); i++; } else if (i + 1 < argc && strcmp(argv[i], "-l") == 0) { loglevel = uint32_t(std::atoi(argv[i + 1])); i++; } #if 0 // Currently only float is supported else if (i + 1 < argc && strcmp(argv[i], "-f") == 0) { if (strcmp(argv[i + 1], "uint8") == 0) { format = FieldFormat::UInt8; } else if (strcmp(argv[i + 1], "uint16") == 0) { format = FieldFormat::UInt16; } else if (strcmp(argv[i + 1], "float") == 0) { format = FieldFormat::Float; } else { fprintf(stderr, "Unknown format '%s'", argv[i + 1]); return EXIT_FAILURE; } i++; } #endif else if ((strcmp(argv[i], "-b") == 0) || (strcmp(argv[i], "--benchmark") == 0)) { benchmark = true; } else if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) { fprintf(stderr, "HP5 Marching Cubes test application.\n"); fprintf(stderr, "Copyright (C) 2020 Christopher Dyken. Released under the MIT license\n\n"); fprintf(stderr, "Usage: %s [options] [dataset]\n\n", argv[0]); fprintf(stderr, "Options:\n"); fprintf(stderr, " -d int Choose CUDA device.\n"); fprintf(stderr, " -nx int Set number of samples in x direction.\n"); fprintf(stderr, " -nx int Set number of samples in y direction.\n"); fprintf(stderr, " -nx int Set number of samples in z direction.\n"); fprintf(stderr, " -n int Set uniform number of samples in x,y,z directions.\n"); fprintf(stderr, " -i float Set iso-value to extract surface for.\n"); fprintf(stderr, " -l int Log-level, higher is more verbose.\n"); fprintf(stderr, " -b Enable benchmark mode without OpenGL interop.\n"); fprintf(stderr, "\nDataset:\n"); fprintf(stderr, " cayley Built-in algebraic surface.\n"); fprintf(stderr, " file.dat Raw binary uint16_t data with three binary uint16_t in front with x,y,z size.\n"); fprintf(stderr, "\nKey bindings:\n"); fprintf(stderr, " right/left Increase/decrease threshold by 100.\n"); fprintf(stderr, " up/down Increase/decrease threshold by 0.1.\n"); fprintf(stderr, " w Enable/disable wireframe.\n"); return 0; } else { if (path) { LOG_ERROR("%s: input already specified", argv[i]); return EXIT_FAILURE; } path = argv[i]; } } if (benchmark) { int deviceCount = 0; CHECKED_CUDA(cudaGetDeviceCount(&deviceCount)); bool found = false; for (int i = 0; i < deviceCount; i++) { cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, i); LOG_INFO("%c[%i] %s cap=%d.%d", i == deviceIndex ? '*' : ' ', i, dev_prop.name, dev_prop.major, dev_prop.minor); if (i == deviceIndex) { found = true; } } if (!found) { LOG_ERROR("Illegal CUDA device index %d", deviceIndex); return EXIT_FAILURE; } cudaSetDevice(deviceIndex); CHECKED_CUDA(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); // Create events for timing static const unsigned eventNum = 32; cudaEvent_t events[2 * eventNum]; for (size_t i = 0; i < 2 * eventNum; i++) { CHECKED_CUDA(cudaEventCreate(&events[i])); CHECKED_CUDA(cudaEventRecord(events[i], stream)); } size_t free, total; CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); float* scalar_field_d = nullptr; setupScalarField(scalar_field_d, path, field_size, stream); LOG_INFO("Built scalar field"); CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); auto* tables = createTables(stream); struct { const char* name; bool indexed; bool sync; } benchmark_cases[] = { {"ix sync", true, true}, {"noix sync", false, true}, {"ix nosync", true, false}, {"noix nosync", false, false} }; float min_time = 0.5; for (auto& bc : benchmark_cases) { #ifdef USE_NVTOOLS_EXT nvtxRangePush(bc.name); #endif auto* ctx = createContext(tables, field_size, true, stream); LOG_INFO("%12s: Created context.", bc.name); LOG_INFO("Grid size [%u x %u x %u]", ctx->grid_size.x, ctx->grid_size.y, ctx->grid_size.z); LOG_INFO("Chunks [%u x %u x %u] (= %u) cover=[%u x %u x %u]", ctx->chunks.x, ctx->chunks.y, ctx->chunks.z, ctx->chunk_total, 31 * ctx->chunks.x, 5 * ctx->chunks.y, 5 * ctx->chunks.z); LOG_INFO("Level vec4-offset vec4-size ( size)"); for (unsigned l = 0; l < ctx->levels; l++) { LOG_INFO("[%2d] %12d %10d (%8d)", l, ctx->level_offsets[l], ctx->level_sizes[l], 4 * ctx->level_sizes[l]); } LOG_INFO("Total %d, levels %d", ctx->total_size, ctx->levels); // Run with no output buffers to get size of output. ComputeStuff::MC::buildPN(ctx, nullptr, nullptr, 0, 0, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); uint32_t vertex_count = 0; uint32_t index_count = 0; ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); float* vertex_data_d = nullptr; CHECKED_CUDA(cudaMalloc(&vertex_data_d, 6 * sizeof(float) * vertex_count)); uint32_t* index_data_d = nullptr; CHECKED_CUDA(cudaMalloc(&index_data_d, sizeof(uint32_t)* index_count)); LOG_INFO("%12s: Allocated output buffers.", bc.name); LOG_INFO("%12s: Warming up", bc.name); for (unsigned i = 0; i < 100; i++) { ComputeStuff::MC::buildPN(ctx, vertex_data_d, index_data_d, 6 * sizeof(float) * vertex_count, sizeof(uint32_t) * index_count, field_size.x, field_size.x * field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); if (bc.sync) { ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); } } LOG_INFO("%12s: Benchmarking", bc.name); auto start = std::chrono::high_resolution_clock::now(); double elapsed = 0.f; float cuda_ms = 0.f; unsigned iterations = 0; unsigned cuda_ms_n = 0; #ifdef USE_NVTOOLS_EXT nvtxRangePush("Benchmark runs"); #endif while (iterations < 100 || elapsed < min_time) { CHECKED_CUDA(cudaEventRecord(events[2 * (iterations % eventNum) + 0], stream)); ComputeStuff::MC::buildPN(ctx, vertex_data_d, index_data_d, 6 * sizeof(float) * vertex_count, sizeof(uint32_t) * index_count, field_size.x, field_size.x * field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); if (bc.sync) { ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); } CHECKED_CUDA(cudaEventRecord(events[2 * (iterations % eventNum) + 1], stream)); if (eventNum <= iterations) { float ms = 0; if (!bc.sync) { CHECKED_CUDA(cudaEventSynchronize(events[2 * ((iterations + 1) % eventNum) + 1])); } CHECKED_CUDA(cudaEventElapsedTime(&ms, events[2 * ((iterations + 1) % eventNum) + 0], events[2 * ((iterations + 1) % eventNum) + 1])); cuda_ms += ms; cuda_ms_n++; } std::chrono::duration<double> span = std::chrono::high_resolution_clock::now() - start; elapsed = span.count(); iterations++; } #ifdef USE_NVTOOLS_EXT nvtxRangePop(); #endif CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_ALWAYS("%12s: %.2f FPS (%.0fMVPS) cuda: %.2fms (%.0f MVPS) %ux%ux%u Nv=%u Ni=%u memfree=%zumb/%zumb", bc.name, iterations / elapsed, (float(iterations) * field_size.x * field_size.y * field_size.z) / (1000000.f * elapsed), cuda_ms / cuda_ms_n, (float(cuda_ms_n) * field_size.x * field_size.y * field_size.z) / (1000.f * cuda_ms), field_size.x, field_size.y, field_size.z, vertex_count, index_count, (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); freeContext(ctx, stream); CHECKED_CUDA(cudaStreamSynchronize(stream)); CHECKED_CUDA(cudaFree(vertex_data_d)); CHECKED_CUDA(cudaFree(index_data_d)); CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_INFO("%12s: Released resources free=%zumb total=%zumb", bc.name, (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); #ifdef USE_NVTOOLS_EXT nvtxRangePop(); #endif } LOG_ALWAYS("Exiting..."); CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_INFO("CUDA memory free=%zumb total=%zumb", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); return 0; } GLFWwindow* win = nullptr; GLuint shadedProg = 0; GLuint solidProg = 0; initWindowAndGL(win, shadedProg, solidProg); unsigned int deviceCount; CHECKED_CUDA(cudaGLGetDevices(&deviceCount, nullptr, 0, cudaGLDeviceListAll)); if (deviceCount == 0) { LOG_ERROR("No CUDA-enabled devices available."); return EXIT_FAILURE; } std::vector<int> devices(deviceCount); CHECKED_CUDA(cudaGLGetDevices(&deviceCount, devices.data(), deviceCount, cudaGLDeviceListAll)); bool found = false; for (unsigned k = 0; k < deviceCount; k++) { int i = devices[k]; cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, i); LOG_INFO("%c[%i] %s cap=%d.%d", i == deviceIndex ? '*' : ' ', i, dev_prop.name, dev_prop.major, dev_prop.minor); if (i == deviceIndex) { found = true; } } if (!found) { LOG_ERROR("Illegal CUDA device index %d", deviceIndex); return EXIT_FAILURE; } cudaSetDevice(deviceIndex); CHECKED_CUDA(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); // Set up scalar field float* scalar_field_d = nullptr; setupScalarField(scalar_field_d, path, field_size, stream); auto* tables = createTables(stream); GLuint wireBoxVertexBuffer = createBuffer(GL_ARRAY_BUFFER, GL_STATIC_DRAW, sizeof(wireBoxVertexData), wireBoxVertexData); uint32_t wireBoxVertexCount = sizeof(wireBoxVertexData) / (3 * sizeof(float)); GLuint wireBoxVbo = 0; glGenVertexArrays(1, &wireBoxVbo); glBindVertexArray(wireBoxVbo); glBindBuffer(GL_ARRAY_BUFFER, wireBoxVertexBuffer); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, nullptr); glEnableVertexAttribArray(0); unsigned eventCounter = 0; cudaEvent_t events[2 * 4]; for (size_t i = 0; i < 2 * 4; i++) { CHECKED_CUDA(cudaEventCreate(&events[i])); CHECKED_CUDA(cudaEventRecord(events[i], stream)); } GLuint cudaVertexBuf = createBuffer(GL_ARRAY_BUFFER, GL_STREAM_DRAW, 3 * sizeof(float), nullptr); cudaGraphicsResource* vertexBufferResource = nullptr; CHECKED_CUDA(cudaGraphicsGLRegisterBuffer(&vertexBufferResource, cudaVertexBuf, cudaGraphicsRegisterFlagsWriteDiscard)); GLuint cudaIndexBuf = createBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_STREAM_DRAW, 3 * sizeof(uint32_t), nullptr); cudaGraphicsResource* indexBufferResource = nullptr; CHECKED_CUDA(cudaGraphicsGLRegisterBuffer(&indexBufferResource, cudaIndexBuf, cudaGraphicsRegisterFlagsWriteDiscard)); GLuint cudaVbo = 0; glGenVertexArrays(1, &cudaVbo); glBindVertexArray(cudaVbo); glBindBuffer(GL_ARRAY_BUFFER, cudaVertexBuf); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, nullptr); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, (void*)(sizeof(float) * 3)); glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); auto start = std::chrono::system_clock::now(); auto timer = std::chrono::high_resolution_clock::now(); float cuda_ms = 0.f; unsigned frames = 0u; ComputeStuff::MC::Context* ctx = nullptr; while (!glfwWindowShouldClose(win)) { int width, height; glfwGetWindowSize(win, &width, &height); uint32_t vertex_count = 0; uint32_t index_count = 0; { if (ctx == nullptr || recreate_context) { freeContext(ctx, stream); ctx = createContext(tables, field_size, indexed, stream); recreate_context = false; } float* cudaVertexBuf_d = nullptr; size_t cudaVertexBuf_size = 0; uint32_t* cudaIndexBuf_d = nullptr; size_t cudaIndexBuf_size = 0; CHECKED_CUDA(cudaGraphicsMapResources(1, &vertexBufferResource, stream)); CHECKED_CUDA(cudaGraphicsResourceGetMappedPointer((void**)&cudaVertexBuf_d, &cudaVertexBuf_size, vertexBufferResource)); if (indexed) { CHECKED_CUDA(cudaGraphicsMapResources(1, &indexBufferResource, stream)); CHECKED_CUDA(cudaGraphicsResourceGetMappedPointer((void**)&cudaIndexBuf_d, &cudaIndexBuf_size, indexBufferResource)); } CHECKED_CUDA(cudaEventRecord(events[2 * eventCounter + 0], stream)); ComputeStuff::MC::buildPN(ctx, cudaVertexBuf_d, cudaIndexBuf_d, cudaVertexBuf_size, cudaIndexBuf_size, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, true, true); CHECKED_CUDA(cudaEventRecord(events[2 * eventCounter + 1], stream)); CHECKED_CUDA(cudaGraphicsUnmapResources(1, &vertexBufferResource, stream)); if (indexed) { CHECKED_CUDA(cudaGraphicsUnmapResources(1, &indexBufferResource, stream)); } ComputeStuff::MC::getCounts(ctx, &vertex_count, &index_count, stream); eventCounter = (eventCounter + 1) & 3; float ms = 0; CHECKED_CUDA(cudaEventElapsedTime(&ms, events[2 * eventCounter + 0], events[2 * eventCounter + 1])); cuda_ms += ms; bool vertexBufTooSmall = cudaVertexBuf_size < 6 * sizeof(float) * vertex_count; bool indexBufTooSmall = cudaIndexBuf_size < sizeof(uint32_t)* index_count; if (vertexBufTooSmall || indexBufTooSmall) { CHECKED_CUDA(cudaGraphicsUnregisterResource(vertexBufferResource)); CHECKED_CUDA(cudaGraphicsUnregisterResource(indexBufferResource)); if (vertexBufTooSmall) { size_t newVertexBufSize = 6 * sizeof(float) * (static_cast<size_t>(vertex_count) + vertex_count / 16); glBindBuffer(GL_ARRAY_BUFFER, cudaVertexBuf); glBufferData(GL_ARRAY_BUFFER, newVertexBufSize, nullptr, GL_STREAM_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); LOG_INFO("Resizing: vbuf=%zub", newVertexBufSize); } if (indexBufTooSmall) { size_t newIndexBufSize = sizeof(uint32_t) * (index_count + index_count / 16); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glBufferData(GL_ELEMENT_ARRAY_BUFFER, newIndexBufSize, nullptr, GL_STREAM_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); LOG_INFO("Resizing: ibuf=%zub", newIndexBufSize); } CHECKED_CUDA(cudaGraphicsGLRegisterBuffer(&vertexBufferResource, cudaVertexBuf, cudaGraphicsRegisterFlagsWriteDiscard)); CHECKED_CUDA(cudaGraphicsGLRegisterBuffer(&indexBufferResource, cudaIndexBuf, cudaGraphicsRegisterFlagsWriteDiscard)); CHECKED_CUDA(cudaGraphicsMapResources(1, &vertexBufferResource, stream)); CHECKED_CUDA(cudaGraphicsResourceGetMappedPointer((void**)&cudaVertexBuf_d, &cudaVertexBuf_size, vertexBufferResource)); if (indexed) { CHECKED_CUDA(cudaGraphicsMapResources(1, &indexBufferResource, stream)); CHECKED_CUDA(cudaGraphicsResourceGetMappedPointer((void**)&cudaIndexBuf_d, &cudaIndexBuf_size, indexBufferResource)); } ComputeStuff::MC::buildPN(ctx, cudaVertexBuf_d, cudaIndexBuf_d, cudaVertexBuf_size, cudaIndexBuf_size, field_size.x, field_size.x* field_size.y, make_uint3(0, 0, 0), field_size, scalar_field_d, threshold, stream, false, indexed); CHECKED_CUDA(cudaGraphicsUnmapResources(1, &vertexBufferResource, stream)); if (indexed) { CHECKED_CUDA(cudaGraphicsUnmapResources(1, &indexBufferResource, stream)); } } } glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); std::chrono::duration<double> elapsed = std::chrono::system_clock::now() - start; float normal_matrix[16]; float modelview_projection[16]; buildTransforms(normal_matrix, modelview_projection, width, height, elapsed.count()); glEnable(GL_DEPTH_TEST); glPolygonOffset(0.f, 1.f); if (wireframe) { glEnable(GL_POLYGON_OFFSET_FILL); } glBindVertexArray(cudaVbo); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); glUseProgram(shadedProg); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 0.6f, 0.6f, 0.8f, 1.f); if (indexed) { glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, nullptr); } else { glDrawArrays(GL_TRIANGLES, 0, vertex_count); } glDisable(GL_POLYGON_OFFSET_FILL); if (wireframe) { glUseProgram(solidProg); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 1.f, 1.f, 1.f, 1.f); if (indexed) { glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cudaIndexBuf); glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, nullptr); } else { glDrawArrays(GL_TRIANGLES, 0, vertex_count); } glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); } glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindVertexArray(wireBoxVbo); glUseProgram(solidProg); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glUniformMatrix4fv(0, 1, GL_FALSE, normal_matrix); glUniformMatrix4fv(1, 1, GL_FALSE, modelview_projection); glUniform4f(2, 1.f, 1.f, 1.f, 1.f); glDrawArrays(GL_LINES, 0, wireBoxVertexCount); glfwSwapBuffers(win); glfwPollEvents(); { frames++; auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = now - timer; auto s = elapsed.count(); if (10 < frames && 3.0 < s) { size_t free, total; CHECKED_CUDA(cudaMemGetInfo(&free, &total)); LOG_INFO("%.2f FPS (%.2f MVPS) cuda avg: %.2fms (%.2f MVPS) %ux%ux%u Nv=%u Ni=%u ix=%s memfree=%zumb/%zumb", frames / s, (float(frames)* field_size.x* field_size.y* field_size.z) / (1000000.f * s), cuda_ms / frames, (float(frames)* field_size.x* field_size.y* field_size.z) / (1000.f * cuda_ms), field_size.x, field_size.y, field_size.z, vertex_count, index_count, indexed ? "y" : "n", (free + 1024 * 1024 - 1) / (1024 * 1024), (total + 1024 * 1024 - 1) / (1024 * 1024)); timer = now; frames = 0; cuda_ms = 0.f; } } } glfwDestroyWindow(win); glfwTerminate(); return EXIT_SUCCESS; }
d9292853a7bf6eb03a0e44e645dc83561a48be1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include "utils/warp_reduce.cuh" template <unsigned int block_size> __global__ void FarthestPointSamplingKernel( // clang-format off const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> points, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lengths, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> K, at::PackedTensorAccessor64<int64_t, 2, at::RestrictPtrTraits> idxs, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> min_point_dist, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> start_idxs // clang-format on ) { // Get constants const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t D = points.size(2); // Create single shared memory buffer which is split and cast to different // types: dists/dists_idx are used to save the maximum distances seen by the // points processed by any one thread and the associated point indices. // These values only need to be accessed by other threads in this block which // are processing the same batch and not by other blocks. extern __shared__ char shared_buf[]; float* dists = (float*)shared_buf; // block_size floats int64_t* dists_idx = (int64_t*)&dists[block_size]; // block_size int64_t // Get batch index and thread index const int64_t batch_idx = blockIdx.x; const size_t tid = threadIdx.x; // If K is greater than the number of points in the pointcloud // we only need to iterate until the smaller value is reached. const int64_t k_n = min(K[batch_idx], lengths[batch_idx]); // Write the first selected point to global memory in the first thread int64_t selected = start_idxs[batch_idx]; if (tid == 0) idxs[batch_idx][0] = selected; // Iterate to find k_n sampled points for (int64_t k = 1; k < k_n; ++k) { // Keep track of the maximum of the minimum distance to previously selected // points seen by this thread int64_t max_dist_idx = 0; float max_dist = -1.0; // Iterate through all the points in this pointcloud. For already selected // points, the minimum distance to the set of previously selected points // will be 0.0 so they won't be selected again. for (int64_t p = tid; p < lengths[batch_idx]; p += block_size) { // Calculate the distance to the last selected point float dist2 = 0.0; for (int64_t d = 0; d < D; ++d) { float diff = points[batch_idx][selected][d] - points[batch_idx][p][d]; dist2 += (diff * diff); } // If the distance of point p to the last selected point is // less than the previous minimum distance of p to the set of selected // points, then updated the corresponding value in min_point_dist // so it always contains the min distance. const float p_min_dist = min(dist2, min_point_dist[batch_idx][p]); min_point_dist[batch_idx][p] = p_min_dist; // Update the max distance and point idx for this thread. max_dist_idx = (p_min_dist > max_dist) ? p : max_dist_idx; max_dist = (p_min_dist > max_dist) ? p_min_dist : max_dist; } // After going through all points for this thread, save the max // point and idx seen by this thread. Each thread sees P/block_size points. dists[tid] = max_dist; dists_idx[tid] = max_dist_idx; // Sync to ensure all threads in the block have updated their max point. __syncthreads(); // Parallelized block reduction to find the max point seen by // all the threads in this block for iteration k. // Each block represents one batch element so we can use a divide/conquer // approach to find the max, syncing all threads after each step. for (int s = block_size / 2; s > 0; s >>= 1) { if (tid < s) { // Compare the best point seen by two threads and update the shared // memory at the location of the first thread index with the max out // of the two threads. if (dists[tid] < dists[tid + s]) { dists[tid] = dists[tid + s]; dists_idx[tid] = dists_idx[tid + s]; } } __syncthreads(); } // TODO(nikhilar): As reduction proceeds, the number of active threads // decreases. When tid < 32, there should only be one warp left which could // be unrolled. // The overall max after reducing will be saved // at the location of tid = 0. selected = dists_idx[0]; if (tid == 0) { // Write the farthest point for iteration k to global memory idxs[batch_idx][k] = selected; } } } at::Tensor FarthestPointSamplingCuda( const at::Tensor& points, // (N, P, 3) const at::Tensor& lengths, // (N,) const at::Tensor& K, // (N,) const at::Tensor& start_idxs) { // Check inputs are on the same device at::TensorArg p_t{points, "points", 1}, lengths_t{lengths, "lengths", 2}, k_t{K, "K", 3}, start_idxs_t{start_idxs, "start_idxs", 4}; at::CheckedFrom c = "FarthestPointSamplingCuda"; at::checkAllSameGPU(c, {p_t, lengths_t, k_t, start_idxs_t}); at::checkAllSameType(c, {lengths_t, k_t, start_idxs_t}); // Set the device for the kernel launch based on the device of points at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK( points.size(0) == lengths.size(0), "Point and lengths must have the same batch dimension"); TORCH_CHECK( points.size(0) == K.size(0), "Points and K must have the same batch dimension"); const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t max_K = at::max(K).item<int64_t>(); // Initialize the output tensor with the sampled indices auto idxs = at::full({N, max_K}, -1, lengths.options()); auto min_point_dist = at::full({N, P}, 1e10, points.options()); if (N == 0 || P == 0) { AT_CUDA_CHECK(hipGetLastError()); return idxs; } // Set the number of blocks to the batch size so that the // block reduction step can be done for each pointcloud // to find the max distance point in the pointcloud at each iteration. const size_t blocks = N; // Set the threads to the nearest power of 2 of the number of // points in the pointcloud (up to the max threads in a block). // This will ensure each thread processes the minimum necessary number of // points (P/threads). const int points_pow_2 = ::log(static_cast<double>(P)) / ::log(2.0); // Max possible threads per block const int MAX_THREADS_PER_BLOCK = 1024; const size_t threads = max(min(1 << points_pow_2, MAX_THREADS_PER_BLOCK), 1); // Create the accessors auto points_a = points.packed_accessor64<float, 3, at::RestrictPtrTraits>(); auto lengths_a = lengths.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto K_a = K.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto idxs_a = idxs.packed_accessor64<int64_t, 2, at::RestrictPtrTraits>(); auto start_idxs_a = start_idxs.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto min_point_dist_a = min_point_dist.packed_accessor64<float, 2, at::RestrictPtrTraits>(); // Initialize the shared memory which will be used to store the // distance/index of the best point seen by each thread. size_t shared_mem = threads * sizeof(float) + threads * sizeof(int64_t); // TODO: using shared memory for min_point_dist gives an ~2x speed up // compared to using a global (N, P) shaped tensor, however for // larger pointclouds this may exceed the shared memory limit per block. // If a speed up is required for smaller pointclouds, then the storage // could be switched to shared memory if the required total shared memory is // within the memory limit per block. // Support a case for all powers of 2 up to MAX_THREADS_PER_BLOCK possible per // block. switch (threads) { case 1024: hipLaunchKernelGGL(( FarthestPointSamplingKernel<1024>) , dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 512: hipLaunchKernelGGL(( FarthestPointSamplingKernel<512>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 256: hipLaunchKernelGGL(( FarthestPointSamplingKernel<256>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 128: hipLaunchKernelGGL(( FarthestPointSamplingKernel<128>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 64: hipLaunchKernelGGL(( FarthestPointSamplingKernel<64>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 32: hipLaunchKernelGGL(( FarthestPointSamplingKernel<32>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 16: hipLaunchKernelGGL(( FarthestPointSamplingKernel<16>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 8: hipLaunchKernelGGL(( FarthestPointSamplingKernel<8>), dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 4: hipLaunchKernelGGL(( FarthestPointSamplingKernel<4>), dim3(threads), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 2: hipLaunchKernelGGL(( FarthestPointSamplingKernel<2>), dim3(threads), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 1: hipLaunchKernelGGL(( FarthestPointSamplingKernel<1>), dim3(threads), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; default: hipLaunchKernelGGL(( FarthestPointSamplingKernel<1024>) , dim3(blocks), dim3(threads), shared_mem, stream, points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); } AT_CUDA_CHECK(hipGetLastError()); return idxs; }
d9292853a7bf6eb03a0e44e645dc83561a48be1a.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include "utils/warp_reduce.cuh" template <unsigned int block_size> __global__ void FarthestPointSamplingKernel( // clang-format off const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> points, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lengths, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> K, at::PackedTensorAccessor64<int64_t, 2, at::RestrictPtrTraits> idxs, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> min_point_dist, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> start_idxs // clang-format on ) { // Get constants const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t D = points.size(2); // Create single shared memory buffer which is split and cast to different // types: dists/dists_idx are used to save the maximum distances seen by the // points processed by any one thread and the associated point indices. // These values only need to be accessed by other threads in this block which // are processing the same batch and not by other blocks. extern __shared__ char shared_buf[]; float* dists = (float*)shared_buf; // block_size floats int64_t* dists_idx = (int64_t*)&dists[block_size]; // block_size int64_t // Get batch index and thread index const int64_t batch_idx = blockIdx.x; const size_t tid = threadIdx.x; // If K is greater than the number of points in the pointcloud // we only need to iterate until the smaller value is reached. const int64_t k_n = min(K[batch_idx], lengths[batch_idx]); // Write the first selected point to global memory in the first thread int64_t selected = start_idxs[batch_idx]; if (tid == 0) idxs[batch_idx][0] = selected; // Iterate to find k_n sampled points for (int64_t k = 1; k < k_n; ++k) { // Keep track of the maximum of the minimum distance to previously selected // points seen by this thread int64_t max_dist_idx = 0; float max_dist = -1.0; // Iterate through all the points in this pointcloud. For already selected // points, the minimum distance to the set of previously selected points // will be 0.0 so they won't be selected again. for (int64_t p = tid; p < lengths[batch_idx]; p += block_size) { // Calculate the distance to the last selected point float dist2 = 0.0; for (int64_t d = 0; d < D; ++d) { float diff = points[batch_idx][selected][d] - points[batch_idx][p][d]; dist2 += (diff * diff); } // If the distance of point p to the last selected point is // less than the previous minimum distance of p to the set of selected // points, then updated the corresponding value in min_point_dist // so it always contains the min distance. const float p_min_dist = min(dist2, min_point_dist[batch_idx][p]); min_point_dist[batch_idx][p] = p_min_dist; // Update the max distance and point idx for this thread. max_dist_idx = (p_min_dist > max_dist) ? p : max_dist_idx; max_dist = (p_min_dist > max_dist) ? p_min_dist : max_dist; } // After going through all points for this thread, save the max // point and idx seen by this thread. Each thread sees P/block_size points. dists[tid] = max_dist; dists_idx[tid] = max_dist_idx; // Sync to ensure all threads in the block have updated their max point. __syncthreads(); // Parallelized block reduction to find the max point seen by // all the threads in this block for iteration k. // Each block represents one batch element so we can use a divide/conquer // approach to find the max, syncing all threads after each step. for (int s = block_size / 2; s > 0; s >>= 1) { if (tid < s) { // Compare the best point seen by two threads and update the shared // memory at the location of the first thread index with the max out // of the two threads. if (dists[tid] < dists[tid + s]) { dists[tid] = dists[tid + s]; dists_idx[tid] = dists_idx[tid + s]; } } __syncthreads(); } // TODO(nikhilar): As reduction proceeds, the number of “active” threads // decreases. When tid < 32, there should only be one warp left which could // be unrolled. // The overall max after reducing will be saved // at the location of tid = 0. selected = dists_idx[0]; if (tid == 0) { // Write the farthest point for iteration k to global memory idxs[batch_idx][k] = selected; } } } at::Tensor FarthestPointSamplingCuda( const at::Tensor& points, // (N, P, 3) const at::Tensor& lengths, // (N,) const at::Tensor& K, // (N,) const at::Tensor& start_idxs) { // Check inputs are on the same device at::TensorArg p_t{points, "points", 1}, lengths_t{lengths, "lengths", 2}, k_t{K, "K", 3}, start_idxs_t{start_idxs, "start_idxs", 4}; at::CheckedFrom c = "FarthestPointSamplingCuda"; at::checkAllSameGPU(c, {p_t, lengths_t, k_t, start_idxs_t}); at::checkAllSameType(c, {lengths_t, k_t, start_idxs_t}); // Set the device for the kernel launch based on the device of points at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK( points.size(0) == lengths.size(0), "Point and lengths must have the same batch dimension"); TORCH_CHECK( points.size(0) == K.size(0), "Points and K must have the same batch dimension"); const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t max_K = at::max(K).item<int64_t>(); // Initialize the output tensor with the sampled indices auto idxs = at::full({N, max_K}, -1, lengths.options()); auto min_point_dist = at::full({N, P}, 1e10, points.options()); if (N == 0 || P == 0) { AT_CUDA_CHECK(cudaGetLastError()); return idxs; } // Set the number of blocks to the batch size so that the // block reduction step can be done for each pointcloud // to find the max distance point in the pointcloud at each iteration. const size_t blocks = N; // Set the threads to the nearest power of 2 of the number of // points in the pointcloud (up to the max threads in a block). // This will ensure each thread processes the minimum necessary number of // points (P/threads). const int points_pow_2 = std::log(static_cast<double>(P)) / std::log(2.0); // Max possible threads per block const int MAX_THREADS_PER_BLOCK = 1024; const size_t threads = max(min(1 << points_pow_2, MAX_THREADS_PER_BLOCK), 1); // Create the accessors auto points_a = points.packed_accessor64<float, 3, at::RestrictPtrTraits>(); auto lengths_a = lengths.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto K_a = K.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto idxs_a = idxs.packed_accessor64<int64_t, 2, at::RestrictPtrTraits>(); auto start_idxs_a = start_idxs.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto min_point_dist_a = min_point_dist.packed_accessor64<float, 2, at::RestrictPtrTraits>(); // Initialize the shared memory which will be used to store the // distance/index of the best point seen by each thread. size_t shared_mem = threads * sizeof(float) + threads * sizeof(int64_t); // TODO: using shared memory for min_point_dist gives an ~2x speed up // compared to using a global (N, P) shaped tensor, however for // larger pointclouds this may exceed the shared memory limit per block. // If a speed up is required for smaller pointclouds, then the storage // could be switched to shared memory if the required total shared memory is // within the memory limit per block. // Support a case for all powers of 2 up to MAX_THREADS_PER_BLOCK possible per // block. switch (threads) { case 1024: FarthestPointSamplingKernel<1024> <<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 512: FarthestPointSamplingKernel<512><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 256: FarthestPointSamplingKernel<256><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 128: FarthestPointSamplingKernel<128><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 64: FarthestPointSamplingKernel<64><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 32: FarthestPointSamplingKernel<32><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 16: FarthestPointSamplingKernel<16><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 8: FarthestPointSamplingKernel<8><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 4: FarthestPointSamplingKernel<4><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 2: FarthestPointSamplingKernel<2><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 1: FarthestPointSamplingKernel<1><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; default: FarthestPointSamplingKernel<1024> <<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); } AT_CUDA_CHECK(cudaGetLastError()); return idxs; }
13801ac967c84be2e727aa3735396158bf5c23de.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2021 by Contributors * @file array/cuda/csr_get_data.cu * @brief Retrieve entries of a CSR matrix */ #include <dgl/array.h> #include <numeric> #include <unordered_set> #include <vector> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DGLDeviceType XPU, typename IdType, typename DType> NDArray CSRGetData( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, DType filler) { const int64_t rowlen = rows->shape[0]; const int64_t collen = cols->shape[0]; CHECK((rowlen == collen) || (rowlen == 1) || (collen == 1)) << "Invalid row and col id array."; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; const int64_t rstlen = ::max(rowlen, collen); IdArray rst = NDArray::Empty({rstlen}, weights->dtype, rows->ctx); if (rstlen == 0) return rst; hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; if (return_eids) BUG_IF_FAIL(DGLDataTypeTraits<DType>::dtype == rows->dtype) << "DType does not match row's dtype."; const IdType* indptr_data = static_cast<IdType*>(cuda::GetDevicePointer(csr.indptr)); const IdType* indices_data = static_cast<IdType*>(cuda::GetDevicePointer(csr.indices)); const IdType* data_data = CSRHasData(csr) ? static_cast<IdType*>(cuda::GetDevicePointer(csr.data)) : nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL( cuda::_LinearSearchKernel, nb, nt, 0, stream, indptr_data, indices_data, data_data, rows.Ptr<IdType>(), cols.Ptr<IdType>(), row_stride, col_stride, rstlen, return_eids ? nullptr : weights.Ptr<DType>(), filler, rst.Ptr<DType>()); return rst; } template NDArray CSRGetData<kDGLCUDA, int32_t, __half>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __half filler); template NDArray CSRGetData<kDGLCUDA, int64_t, __half>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __half filler); #if BF16_ENABLED template NDArray CSRGetData<kDGLCUDA, int32_t, __nv_bfloat16>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __nv_bfloat16 filler); template NDArray CSRGetData<kDGLCUDA, int64_t, __nv_bfloat16>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __nv_bfloat16 filler); #endif // BF16_ENABLED template NDArray CSRGetData<kDGLCUDA, int32_t, float>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, float filler); template NDArray CSRGetData<kDGLCUDA, int64_t, float>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, float filler); template NDArray CSRGetData<kDGLCUDA, int32_t, double>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, double filler); template NDArray CSRGetData<kDGLCUDA, int64_t, double>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, double filler); // For CSRGetData<XPU, IdType>(CSRMatrix, NDArray, NDArray) template NDArray CSRGetData<kDGLCUDA, int32_t, int32_t>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, int32_t filler); template NDArray CSRGetData<kDGLCUDA, int64_t, int64_t>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, int64_t filler); } // namespace impl } // namespace aten } // namespace dgl
13801ac967c84be2e727aa3735396158bf5c23de.cu
/** * Copyright (c) 2021 by Contributors * @file array/cuda/csr_get_data.cu * @brief Retrieve entries of a CSR matrix */ #include <dgl/array.h> #include <numeric> #include <unordered_set> #include <vector> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DGLDeviceType XPU, typename IdType, typename DType> NDArray CSRGetData( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, DType filler) { const int64_t rowlen = rows->shape[0]; const int64_t collen = cols->shape[0]; CHECK((rowlen == collen) || (rowlen == 1) || (collen == 1)) << "Invalid row and col id array."; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; const int64_t rstlen = std::max(rowlen, collen); IdArray rst = NDArray::Empty({rstlen}, weights->dtype, rows->ctx); if (rstlen == 0) return rst; cudaStream_t stream = runtime::getCurrentCUDAStream(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; if (return_eids) BUG_IF_FAIL(DGLDataTypeTraits<DType>::dtype == rows->dtype) << "DType does not match row's dtype."; const IdType* indptr_data = static_cast<IdType*>(cuda::GetDevicePointer(csr.indptr)); const IdType* indices_data = static_cast<IdType*>(cuda::GetDevicePointer(csr.indices)); const IdType* data_data = CSRHasData(csr) ? static_cast<IdType*>(cuda::GetDevicePointer(csr.data)) : nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL( cuda::_LinearSearchKernel, nb, nt, 0, stream, indptr_data, indices_data, data_data, rows.Ptr<IdType>(), cols.Ptr<IdType>(), row_stride, col_stride, rstlen, return_eids ? nullptr : weights.Ptr<DType>(), filler, rst.Ptr<DType>()); return rst; } template NDArray CSRGetData<kDGLCUDA, int32_t, __half>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __half filler); template NDArray CSRGetData<kDGLCUDA, int64_t, __half>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __half filler); #if BF16_ENABLED template NDArray CSRGetData<kDGLCUDA, int32_t, __nv_bfloat16>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __nv_bfloat16 filler); template NDArray CSRGetData<kDGLCUDA, int64_t, __nv_bfloat16>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, __nv_bfloat16 filler); #endif // BF16_ENABLED template NDArray CSRGetData<kDGLCUDA, int32_t, float>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, float filler); template NDArray CSRGetData<kDGLCUDA, int64_t, float>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, float filler); template NDArray CSRGetData<kDGLCUDA, int32_t, double>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, double filler); template NDArray CSRGetData<kDGLCUDA, int64_t, double>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, double filler); // For CSRGetData<XPU, IdType>(CSRMatrix, NDArray, NDArray) template NDArray CSRGetData<kDGLCUDA, int32_t, int32_t>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, int32_t filler); template NDArray CSRGetData<kDGLCUDA, int64_t, int64_t>( CSRMatrix csr, NDArray rows, NDArray cols, bool return_eids, NDArray weights, int64_t filler); } // namespace impl } // namespace aten } // namespace dgl
da769d43faab239e6a336961a2d11014e489e02e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include<limits> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <algorithm> using namespace std; // 8 byte. how to be 128byte? // Parameter need to restruct. //2 bytes, 2 bytes, 4 bytes, 4 bytes, 4 bytes. struct NUM_ADD { short2 read_haplotype; int Read_array; int read_large_length; }; double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } __constant__ float constant[10]; __constant__ int constant_int[10]; __global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG ) // what is the maximum number of parameters? { //MG, DG and IG are global memory to store indermediate result? //each thread finish one computation int offset=blockIdx.x*blockDim.x+threadIdx.x; MG=MG+offset; IG=IG+offset; DG=DG+offset; //printf("%d %d %d %d %d\n", constant_int[0],constant_int[1], constant_int[2],constant_int[3], constant_int[4]); while(offset<size) { __shared__ float parameter1[1024]; __shared__ float parameter2[1024]; __shared__ float parameter3[1024]; __shared__ float parameter4[1024]; //NUM_ADD number_address; //number_address=num_add[offset];//get from global memory short2 read_haplotype_number=num_add[offset].read_haplotype; int read_large_length=num_add[offset].read_large_length; //read_haplotype_number.x=number_address.read_number; char4 * read_base_array=(char4 *)(data+num_add[offset].Read_array); // to caculate the address of read_base_array. float *parameter1_array=(float *) (read_base_array+(read_large_length+3)/4*32); read_large_length=read_large_length*32; float *parameter2_array=(float *) (parameter1_array+read_large_length); float *parameter3_array=(float *) (parameter1_array+read_large_length*2); float *parameter4_array=(float *) (parameter1_array+read_large_length*3); //read_haplotype_number.y=number_address.haplotype_number; char4 * haplotype_base_array=(char4 * )(parameter1_array+read_large_length*4); //haplotype is 4 byte. Thus, in a warp it is 4*32=128 byte. //we need to change the struct of haplotype float result_block=constant[5]; char4 read_base_4_1; char4 read_base_4_2; int i; //if number_address.read_number is even //int time=0; //if(threadIdx.x==0) for(i=0;i<read_haplotype_number.x/8;i++) { //got read_base from globle memory (which is 32*4 (char4) = 128 bytes ) //read_base_4=read_base_array[i*constant_int[2]]; char4 read_base_temp; int cc=i*2*32; read_base_temp=read_base_array[cc]; read_base_4_1.x=read_base_temp.x; read_base_4_1.y=read_base_temp.y; read_base_4_1.z=read_base_temp.z; read_base_4_1.w=read_base_temp.w; read_base_temp=read_base_array[cc+32]; read_base_4_2.x=read_base_temp.x; read_base_4_2.y=read_base_temp.y; read_base_4_2.z=read_base_temp.z; read_base_4_2.w=read_base_temp.w; int skip=i*constant_int[1]; parameter1[threadIdx.x]=parameter1_array[skip]; parameter2[threadIdx.x]=parameter2_array[skip]; parameter3[threadIdx.x]=parameter3_array[skip]; parameter4[threadIdx.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*2]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*2]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*2]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*2]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*3]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*3]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*3]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*3]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*4]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*4]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*4]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*4]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*5]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*5]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*5]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*5]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*6]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*6]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*6]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*6]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*7]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*7]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*7]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*7]=parameter4_array[skip]; float Ml=constant[5];// left M; float Dl=constant[5];// left D; float Il=constant[5];// left I float M2=constant[5]; //left M2 float D2=constant[5]; //left D2 float M3=constant[5]; float D3=constant[5]; float M4=constant[5]; float D4=constant[5]; float M5=0; float D5=0; float M6=0; float D6=0; float M7=0; float D7=0; float M8=0; float D8=0; float MU=constant[5];// up M; float IU=constant[5];// up I; float DU=constant[5];// up D; float MMID=constant[5]; float MMID2=constant[5]; float MMID3=constant[5]; float MMID4=constant[5]; float MMID5=constant[5]; float MMID6=constant[5]; float MMID7=constant[5]; float MMID8=constant[5]; //epsion=constant[4]; // beta=constant[3]; int hh=(read_haplotype_number.y+3)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { //time++; float Qm,Qm_1,alpha,delta,xiksi; if(j*4+kk==read_haplotype_number.y) break; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } else { DU= constant[0] /(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } Qm=parameter1[threadIdx.x]; delta=parameter2[threadIdx.x]; xiksi=parameter3[threadIdx.x]; alpha=parameter4[threadIdx.x]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); // if(i==1)printf("%e %e", IIMI, MU); char4 read_haplotype_base; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; float aa=(read_haplotype_base.y==read_base_4_1.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Dl=__fmaf_rn(Dl,constant[4],DDM); Il=__fmaf_rn(MU,delta,IIMI); MMID=__fmaf_rn(alpha,MU,MIIDD); //2 // if(i==1) printf("R=%c H=%c M1=%e I1=%e D1=%e\n", read_base_4_1.x,read_haplotype_base.y,Ml,Il,Dl); skip=threadIdx.x+blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); // if(i==1) printf("%e %e %e %e %e", Qm,delta,xiksi,alpha,Qm_1); MID=__fadd_rn(Il,Dl); DDM=__fmul_rn(M2,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M2=__fmul_rn(aa,MMID2); D2=__fmaf_rn(D2,constant[4],DDM); Il=__fmaf_rn(Ml,delta,IIMI); MMID2=__fmaf_rn(alpha,Ml,MIIDD); // if(i==1) printf("R=%c H=%c M1=%e I1=%e D1=%e\n", read_base_4_1.y,read_haplotype_base.y,M2,Il,D2); //3 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; //epsion=0.1; //beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D2); DDM=__fmul_rn(M3,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M3=__fmul_rn(aa,MMID3); D3=__fmaf_rn(D3,constant[4],DDM); Il=__fmaf_rn(M2,delta,IIMI); MMID3=__fmaf_rn(alpha,M2,MIIDD); // if(i==1) printf("R=%c H=%c M3=%e I3=%e D3=%e\n", read_base_4_1.z,read_haplotype_base.y,M3,Il,D3); //4 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D3); DDM=__fmul_rn(M4,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M4=__fmul_rn(aa,MMID4); D4=__fmaf_rn(D4,constant[4],DDM); Il=__fmaf_rn(M3,delta,IIMI); MMID4=__fmaf_rn(alpha,M3,MIIDD); // if(i==1) printf("R=%c H=%c M4=%e I4=%e D4=%e\n", read_base_4_2.y,read_haplotype_base.y,M4,Il,D4); //5 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D4); DDM=__fmul_rn(M5,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.x)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M5=__fmul_rn(aa,MMID5); D5=__fmaf_rn(D5,constant[4],DDM); Il=__fmaf_rn(M4,delta,IIMI); MMID5=__fmaf_rn(alpha,M4,MIIDD); // if(i==1) // printf("R=%c H=%c M5=%e I5=%e D5=%e\n", read_base_4_2.y,read_haplotype_base.y,M5,Il,D5); //6 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D5); DDM=__fmul_rn(M6,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M6=__fmul_rn(aa,MMID6); D6=__fmaf_rn(D6,constant[4],DDM); Il=__fmaf_rn(M5,delta,IIMI); MMID6=__fmaf_rn(alpha,M5,MIIDD); // if(i==1) printf("R=%c H=%c M6=%e I6=%e D6=%e\n", read_base_4_2.y,read_haplotype_base.y,M6,Il,D6); //7 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D6); DDM=__fmul_rn(M7,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M7=__fmul_rn(aa,MMID7); D7=__fmaf_rn(D7,constant[4],DDM); Il=__fmaf_rn(M6,delta,IIMI); MMID7=__fmaf_rn(alpha,M6,MIIDD); //8 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D7); DDM=__fmul_rn(M8,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M8=__fmul_rn(aa,MMID8); D8=__fmaf_rn(D8,constant[4],DDM); Il=__fmaf_rn(M7,delta,IIMI); MMID8=__fmaf_rn(alpha,M7,MIIDD); if(i==read_haplotype_number.x/8-1 && read_haplotype_number.x%8==0) { result_block=__fadd_rn(result_block,__fadd_rn(M8,Il)); } else { MG[index]=M8; IG[index]=Il; DG[index]=D8; } }//8 }//haplotype } // if(threadIdx.x==0) // printf("time=%d\n",time); //following is only 56 registers for(i=i*8;i<read_haplotype_number.x;i++) { //char4 read_base_4; if(i%4==0) { read_base_4_1=read_base_array[i/4*constant_int[2]]; } char4 read_haplotype_base; if(i%4==0) read_haplotype_base.x=read_base_4_1.x; if(i%4==1) read_haplotype_base.x=read_base_4_1.y; if(i%4==2) read_haplotype_base.x=read_base_4_1.z; if(i%4==3) read_haplotype_base.x=read_base_4_1.w; float Qm,Qm_1,alpha,delta,xiksi; delta=parameter2_array[i*constant_int[2]]; xiksi=parameter3_array[i*constant_int[2]]; alpha=parameter4_array[i*constant_int[2]]; Qm=parameter1_array[i*constant_int[2]]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float Ml=0;// left M; float Dl=0;// left D; float Il=0; float MU=0;// up M; float IU=0;// up I; float DU=0;// up D; float MMID=0; if(i==0) { DU=constant[0]/(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } int hh=(read_haplotype_number.y+4-1)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { if(j*4+kk==read_haplotype_number.y) break; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); float aa=(read_haplotype_base.y==read_haplotype_base.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Il=__fmaf_rn(MU,delta,IIMI); Dl=__fmaf_rn(Dl,constant[4],DDM); MMID=__fmaf_rn(alpha,MU,MIIDD); if(i<read_haplotype_number.x-1) { MG[index]=Ml; IG[index]=Il; DG[index]=Dl; } else result_block=__fadd_rn(result_block,__fadd_rn(Ml,Il)); }//4 } //haplotype }//read result[offset]=result_block; offset+=gridDim.x*blockDim.x ; } } struct InputData { int read_size; char read_base[260]; char base_quals[260]; char ins_quals[260]; char del_quals[260]; char gcp_quals[260]; int haplotype_size; char haplotype_base[500]; }; bool operator<(const InputData &a, const InputData &b) { // return x.point_value > y.point_value; if(a.read_size<b.read_size) return true; if(a.read_size==b.read_size) return a.haplotype_size<b.haplotype_size; else return false; } int main(int argc, char * argv[]) { int INI=(log10f((std::numeric_limits<float>::max() / 16))); //printf("input value of size_each_for \n"); //scanf("%d", &size_each_for); struct timespec start,finish; double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0; double total_time=0; FILE * file; file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r"); //file=fopen("../a.txt","r"); // file=fopen(argv[1],"r"); //file=fopen("32_data.txt","r"); // file=fopen("less.txt","r"); int size; fscanf(file,"%d",&size); float * MG; float * DG; float * IG; hipMalloc( (float **)& MG,sizeof(float) *128*45*500*3); DG=MG+45*128*500;// ???? IG=DG+45*128*500; //????? clock_gettime(CLOCK_MONOTONIC_RAW,&start); float ph2pr_h[128]; for(int i=0;i<128;i++) { ph2pr_h[i]=powf(10.f, -((float)i) / 10.f); } hipError_t err; int constants_h_int[10]; float constants_h[10]; constants_h[0]=1.329228e+36; constants_h[1]=1.0; constants_h[2]=3.0; constants_h[3]=0.9; constants_h[4]=0.1; constants_h[5]=0.0; constants_h_int[0]=0; constants_h_int[1]=32*8; constants_h_int[2]=32; constants_h_int[3]=4; constants_h_int[4]=3; hipMemcpyToSymbol(constant,constants_h,sizeof(float)*10 ); hipMemcpyToSymbol(constant_int,constants_h_int,sizeof(int)*10 ); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // data_prepare+=diff(start,finish); int total=0; char * result_d_total; float read_read, haplotype_haplotype; while(!feof(file)) { total+=size; char useless; useless=fgetc(file); clock_gettime(CLOCK_MONOTONIC_RAW,&start); InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { int read_size; fscanf(file,"%d\n",&inputdata[i].read_size); fscanf(file,"%s ",inputdata[i].read_base); read_size=inputdata[i].read_size; read_read=read_size; for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i]. base_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].ins_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].del_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; if(j<read_size-1) fscanf(file,"%d ",&aa); else fscanf(file,"%d \n",&aa); inputdata[i].gcp_quals[j]=(char)aa; } fscanf(file,"%d\n",&inputdata[i].haplotype_size); fscanf(file, "%s\n",inputdata[i].haplotype_base); haplotype_haplotype=inputdata[i].haplotype_size; } clock_gettime(CLOCK_MONOTONIC_RAW,&finish); read_time+=diff(start,finish); float * result_h=(float *) malloc(sizeof(float)*size); struct timespec start_total,finish_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start_total); char * data_h_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start); std::sort(inputdata, inputdata+size); //32 one chunck. int malloc_size_for_each_chunk=(65*4*32+260*4*32*4+125*4*32) ; int total_size=(size+31)/32*malloc_size_for_each_chunk+(size*sizeof(NUM_ADD)+127)/128*128; data_h_total=(char*)malloc(total_size); err=hipMalloc( (char **) &result_d_total,total_size+size*sizeof(float)); if(err!=hipSuccess) printf("Error %d:%s !\n", err, hipGetErrorString(err)); char * data_d_total=result_d_total; float * result_d=(float *)(result_d_total+total_size);//last part is to store the result. char * data_h=data_h_total; char * data_h_begin=data_h; NUM_ADD *data_num_add=(NUM_ADD *) (data_h); data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment. int data_size=0; //for each chunk int total_in_each=(size+31)/32; for(int i=0;i<total_in_each;i++) { //each is 32 //printf("total_in_each %d\n",total_in_each); //read_base int long_read_size=0; //to find the longest read_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_read_size<inputdata[i*32+j].read_size) long_read_size=inputdata[i*32+j].read_size; } int change_length=(long_read_size+3)/4;//because tile=4; each time deal with 4 read char4 read_base_data[32*65]; for(int kk=0;kk<change_length;kk++) { for(int dd=0;dd<32;dd++) // { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk*4) continue; else read_base_data[kk*32+dd].x=inputdata[i*32+dd].read_base[kk*4]; if(inputdata[i*32+dd].read_size<=kk*4+1) continue; else read_base_data[kk*32+dd].y=inputdata[i*32+dd].read_base[kk*4+1]; if(inputdata[i*32+dd].read_size<=kk*4+2) continue; else read_base_data[kk*32+dd].z=inputdata[i*32+dd].read_base[kk*4+2]; if(inputdata[i*32+dd].read_size<=kk*4+3) continue; else read_base_data[kk*32+dd].w=inputdata[i*32+dd].read_base[kk*4+3]; } } //finish read_base float parameter1[260*32];//Qm//128 do not change to 128 float parameter2[260*32];//QI//128 do not change to 128 float parameter3[260*32];//QD/128 do not change to 128 float parameter4[260*32];//alpha//128 do not change to 128 for(int kk=0;kk<long_read_size;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk) continue; else { parameter1[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].base_quals[kk]&127]; parameter2[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].ins_quals[kk]&127] ; parameter3[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].del_quals[kk]&127] ; parameter4[kk*32+dd]= 1.0f-ph2pr_h[((int)(inputdata[i*32+dd].ins_quals[kk]&127)+(int)( inputdata[i*32+dd].del_quals[kk]&127))&127]; // printf("kk=%d x=%d y=%d z=%d w=%d \n ",kk,parameter1[kk*32+dd],parameter2[kk*32+dd],parameter3[kk*32+dd],parameter4[kk*32+dd] ); } } } //to haplotype into 32 char4 int long_haplotype_size=0; //to find the longest hapltoype_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_haplotype_size<inputdata[i*32+j].haplotype_size) long_haplotype_size=inputdata[i*32+j].haplotype_size; } int haplotype_change_length=(long_haplotype_size+3)/4; char4 haplotype_base_data[32*125]; for(int kk=0;kk<haplotype_change_length;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].haplotype_size<=kk*4) continue; else haplotype_base_data[kk*32+dd].x=inputdata[i*32+dd].haplotype_base[kk*4]; if(inputdata[i*32+dd].haplotype_size<=kk*4+1) continue; else haplotype_base_data[kk*32+dd].y=inputdata[i*32+dd].haplotype_base[kk*4+1]; if(inputdata[i*32+dd].haplotype_size<=kk*4+2) continue; else haplotype_base_data[kk*32+dd].z=inputdata[i*32+dd].haplotype_base[kk*4+2]; if(inputdata[i*32+dd].haplotype_size<=kk*4+3) continue; else haplotype_base_data[kk*32+dd].w=inputdata[i*32+dd].haplotype_base[kk*4+3]; } } //put data address to each pair of read and haplotype. // read address memcpy(data_h,read_base_data,sizeof(char4)*32*change_length);//128 for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_haplotype.x=inputdata[i*32+kk].read_size; data_num_add[i*32+kk].read_haplotype.y=inputdata[i*32+kk].haplotype_size; data_num_add[i*32+kk].Read_array=data_size+sizeof(char4)*kk; // printf("set read size %d %d \n", data_num_add[i*32+kk].read_number,data_num_add[i*32+kk].haplotype_number); } data_h+=sizeof(char4)*32*change_length; data_size+=sizeof(char4)*32*change_length; //parameter address memcpy(data_h,parameter1,sizeof(float)*32*long_read_size); for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_large_length=long_read_size; } data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter2,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter3,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter4,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; //haplotype address memcpy(data_h,haplotype_base_data,sizeof(char4)*32*haplotype_change_length); data_h+=sizeof(char4)*32*haplotype_change_length; data_size+=sizeof(char4)*32*haplotype_change_length; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; char * data_d; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; //printf("data_d_total %p num_add_d %p data_d %p \n",data_d_total, num_add_d,data_d); int blocksize=128; int gridsize=45;//90 dim3 block(blocksize); dim3 grid(gridsize); // global memory to be used by GPU kernels. //float * MG; //float * DG; //float * IG; clock_gettime(CLOCK_MONOTONIC_RAW,&start); err=hipMemcpy(data_d_total,data_h_begin,data_size_to_copy,hipMemcpyHostToDevice); if(err!=hipSuccess) printf("Error %d: %s !\n", err, hipGetErrorString(err)); //hipMalloc( (float **)& MG,sizeof(float) *blocksize*gridsize*500*3); //DG=MG+blocksize*gridsize*500;// ???? //IG=DG+blocksize*gridsize*500; //????? //clock_gettime(CLOCK_MONOTONIC_RAW,&finish); //data_prepare+=diff(start,finish); hipLaunchKernelGGL(( pairHMM), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d, result_d,MG,DG,IG); hipMemcpy(result_h,result_d,size*sizeof(float),hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); for(int i=0;i<size;i++) float aa=(log10f((double)result_h[i]) - INI); // printf(" i=%d %e\n",i, result_h[i]); free(data_h_total); hipFree(result_d_total); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total); total_time+=diff(start_total,finish_total); free(result_h); free(inputdata); fscanf(file,"%d",&size); // if(total>10000) // break; // printf("%d\n",size); } hipFree(MG); hipDeviceReset(); printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time); printf("total_time=%e\n",total_time); //printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000); return 0; }
da769d43faab239e6a336961a2d11014e489e02e.cu
#include <iostream> #include<limits> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <algorithm> using namespace std; // 8 byte. how to be 128byte? // Parameter need to restruct. //2 bytes, 2 bytes, 4 bytes, 4 bytes, 4 bytes. struct NUM_ADD { short2 read_haplotype; int Read_array; int read_large_length; }; double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } __constant__ float constant[10]; __constant__ int constant_int[10]; __global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG ) // what is the maximum number of parameters? { //MG, DG and IG are global memory to store indermediate result? //each thread finish one computation int offset=blockIdx.x*blockDim.x+threadIdx.x; MG=MG+offset; IG=IG+offset; DG=DG+offset; //printf("%d %d %d %d %d\n", constant_int[0],constant_int[1], constant_int[2],constant_int[3], constant_int[4]); while(offset<size) { __shared__ float parameter1[1024]; __shared__ float parameter2[1024]; __shared__ float parameter3[1024]; __shared__ float parameter4[1024]; //NUM_ADD number_address; //number_address=num_add[offset];//get from global memory short2 read_haplotype_number=num_add[offset].read_haplotype; int read_large_length=num_add[offset].read_large_length; //read_haplotype_number.x=number_address.read_number; char4 * read_base_array=(char4 *)(data+num_add[offset].Read_array); // to caculate the address of read_base_array. float *parameter1_array=(float *) (read_base_array+(read_large_length+3)/4*32); read_large_length=read_large_length*32; float *parameter2_array=(float *) (parameter1_array+read_large_length); float *parameter3_array=(float *) (parameter1_array+read_large_length*2); float *parameter4_array=(float *) (parameter1_array+read_large_length*3); //read_haplotype_number.y=number_address.haplotype_number; char4 * haplotype_base_array=(char4 * )(parameter1_array+read_large_length*4); //haplotype is 4 byte. Thus, in a warp it is 4*32=128 byte. //we need to change the struct of haplotype float result_block=constant[5]; char4 read_base_4_1; char4 read_base_4_2; int i; //if number_address.read_number is even //int time=0; //if(threadIdx.x==0) for(i=0;i<read_haplotype_number.x/8;i++) { //got read_base from globle memory (which is 32*4 (char4) = 128 bytes ) //read_base_4=read_base_array[i*constant_int[2]]; char4 read_base_temp; int cc=i*2*32; read_base_temp=read_base_array[cc]; read_base_4_1.x=read_base_temp.x; read_base_4_1.y=read_base_temp.y; read_base_4_1.z=read_base_temp.z; read_base_4_1.w=read_base_temp.w; read_base_temp=read_base_array[cc+32]; read_base_4_2.x=read_base_temp.x; read_base_4_2.y=read_base_temp.y; read_base_4_2.z=read_base_temp.z; read_base_4_2.w=read_base_temp.w; int skip=i*constant_int[1]; parameter1[threadIdx.x]=parameter1_array[skip]; parameter2[threadIdx.x]=parameter2_array[skip]; parameter3[threadIdx.x]=parameter3_array[skip]; parameter4[threadIdx.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*2]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*2]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*2]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*2]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*3]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*3]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*3]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*3]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*4]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*4]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*4]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*4]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*5]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*5]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*5]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*5]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*6]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*6]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*6]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*6]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*7]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*7]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*7]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*7]=parameter4_array[skip]; float Ml=constant[5];// left M; float Dl=constant[5];// left D; float Il=constant[5];// left I float M2=constant[5]; //left M2 float D2=constant[5]; //left D2 float M3=constant[5]; float D3=constant[5]; float M4=constant[5]; float D4=constant[5]; float M5=0; float D5=0; float M6=0; float D6=0; float M7=0; float D7=0; float M8=0; float D8=0; float MU=constant[5];// up M; float IU=constant[5];// up I; float DU=constant[5];// up D; float MMID=constant[5]; float MMID2=constant[5]; float MMID3=constant[5]; float MMID4=constant[5]; float MMID5=constant[5]; float MMID6=constant[5]; float MMID7=constant[5]; float MMID8=constant[5]; //epsion=constant[4]; // beta=constant[3]; int hh=(read_haplotype_number.y+3)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { //time++; float Qm,Qm_1,alpha,delta,xiksi; if(j*4+kk==read_haplotype_number.y) break; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } else { DU= constant[0] /(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } Qm=parameter1[threadIdx.x]; delta=parameter2[threadIdx.x]; xiksi=parameter3[threadIdx.x]; alpha=parameter4[threadIdx.x]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); // if(i==1)printf("%e %e", IIMI, MU); char4 read_haplotype_base; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; float aa=(read_haplotype_base.y==read_base_4_1.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Dl=__fmaf_rn(Dl,constant[4],DDM); Il=__fmaf_rn(MU,delta,IIMI); MMID=__fmaf_rn(alpha,MU,MIIDD); //2 // if(i==1) printf("R=%c H=%c M1=%e I1=%e D1=%e\n", read_base_4_1.x,read_haplotype_base.y,Ml,Il,Dl); skip=threadIdx.x+blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); // if(i==1) printf("%e %e %e %e %e", Qm,delta,xiksi,alpha,Qm_1); MID=__fadd_rn(Il,Dl); DDM=__fmul_rn(M2,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M2=__fmul_rn(aa,MMID2); D2=__fmaf_rn(D2,constant[4],DDM); Il=__fmaf_rn(Ml,delta,IIMI); MMID2=__fmaf_rn(alpha,Ml,MIIDD); // if(i==1) printf("R=%c H=%c M1=%e I1=%e D1=%e\n", read_base_4_1.y,read_haplotype_base.y,M2,Il,D2); //3 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; //epsion=0.1; //beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D2); DDM=__fmul_rn(M3,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M3=__fmul_rn(aa,MMID3); D3=__fmaf_rn(D3,constant[4],DDM); Il=__fmaf_rn(M2,delta,IIMI); MMID3=__fmaf_rn(alpha,M2,MIIDD); // if(i==1) printf("R=%c H=%c M3=%e I3=%e D3=%e\n", read_base_4_1.z,read_haplotype_base.y,M3,Il,D3); //4 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D3); DDM=__fmul_rn(M4,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_1.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M4=__fmul_rn(aa,MMID4); D4=__fmaf_rn(D4,constant[4],DDM); Il=__fmaf_rn(M3,delta,IIMI); MMID4=__fmaf_rn(alpha,M3,MIIDD); // if(i==1) printf("R=%c H=%c M4=%e I4=%e D4=%e\n", read_base_4_2.y,read_haplotype_base.y,M4,Il,D4); //5 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D4); DDM=__fmul_rn(M5,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.x)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M5=__fmul_rn(aa,MMID5); D5=__fmaf_rn(D5,constant[4],DDM); Il=__fmaf_rn(M4,delta,IIMI); MMID5=__fmaf_rn(alpha,M4,MIIDD); // if(i==1) // printf("R=%c H=%c M5=%e I5=%e D5=%e\n", read_base_4_2.y,read_haplotype_base.y,M5,Il,D5); //6 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D5); DDM=__fmul_rn(M6,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M6=__fmul_rn(aa,MMID6); D6=__fmaf_rn(D6,constant[4],DDM); Il=__fmaf_rn(M5,delta,IIMI); MMID6=__fmaf_rn(alpha,M5,MIIDD); // if(i==1) printf("R=%c H=%c M6=%e I6=%e D6=%e\n", read_base_4_2.y,read_haplotype_base.y,M6,Il,D6); //7 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D6); DDM=__fmul_rn(M7,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M7=__fmul_rn(aa,MMID7); D7=__fmaf_rn(D7,constant[4],DDM); Il=__fmaf_rn(M6,delta,IIMI); MMID7=__fmaf_rn(alpha,M6,MIIDD); //8 skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D7); DDM=__fmul_rn(M8,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4_2.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M8=__fmul_rn(aa,MMID8); D8=__fmaf_rn(D8,constant[4],DDM); Il=__fmaf_rn(M7,delta,IIMI); MMID8=__fmaf_rn(alpha,M7,MIIDD); if(i==read_haplotype_number.x/8-1 && read_haplotype_number.x%8==0) { result_block=__fadd_rn(result_block,__fadd_rn(M8,Il)); } else { MG[index]=M8; IG[index]=Il; DG[index]=D8; } }//8 }//haplotype } // if(threadIdx.x==0) // printf("time=%d\n",time); //following is only 56 registers for(i=i*8;i<read_haplotype_number.x;i++) { //char4 read_base_4; if(i%4==0) { read_base_4_1=read_base_array[i/4*constant_int[2]]; } char4 read_haplotype_base; if(i%4==0) read_haplotype_base.x=read_base_4_1.x; if(i%4==1) read_haplotype_base.x=read_base_4_1.y; if(i%4==2) read_haplotype_base.x=read_base_4_1.z; if(i%4==3) read_haplotype_base.x=read_base_4_1.w; float Qm,Qm_1,alpha,delta,xiksi; delta=parameter2_array[i*constant_int[2]]; xiksi=parameter3_array[i*constant_int[2]]; alpha=parameter4_array[i*constant_int[2]]; Qm=parameter1_array[i*constant_int[2]]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float Ml=0;// left M; float Dl=0;// left D; float Il=0; float MU=0;// up M; float IU=0;// up I; float DU=0;// up D; float MMID=0; if(i==0) { DU=constant[0]/(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } int hh=(read_haplotype_number.y+4-1)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { if(j*4+kk==read_haplotype_number.y) break; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); float aa=(read_haplotype_base.y==read_haplotype_base.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Il=__fmaf_rn(MU,delta,IIMI); Dl=__fmaf_rn(Dl,constant[4],DDM); MMID=__fmaf_rn(alpha,MU,MIIDD); if(i<read_haplotype_number.x-1) { MG[index]=Ml; IG[index]=Il; DG[index]=Dl; } else result_block=__fadd_rn(result_block,__fadd_rn(Ml,Il)); }//4 } //haplotype }//read result[offset]=result_block; offset+=gridDim.x*blockDim.x ; } } struct InputData { int read_size; char read_base[260]; char base_quals[260]; char ins_quals[260]; char del_quals[260]; char gcp_quals[260]; int haplotype_size; char haplotype_base[500]; }; bool operator<(const InputData &a, const InputData &b) { // return x.point_value > y.point_value; if(a.read_size<b.read_size) return true; if(a.read_size==b.read_size) return a.haplotype_size<b.haplotype_size; else return false; } int main(int argc, char * argv[]) { int INI=(log10f((std::numeric_limits<float>::max() / 16))); //printf("input value of size_each_for \n"); //scanf("%d", &size_each_for); struct timespec start,finish; double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0; double total_time=0; FILE * file; file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r"); //file=fopen("../a.txt","r"); // file=fopen(argv[1],"r"); //file=fopen("32_data.txt","r"); // file=fopen("less.txt","r"); int size; fscanf(file,"%d",&size); float * MG; float * DG; float * IG; cudaMalloc( (float **)& MG,sizeof(float) *128*45*500*3); DG=MG+45*128*500;// ???? IG=DG+45*128*500; //????? clock_gettime(CLOCK_MONOTONIC_RAW,&start); float ph2pr_h[128]; for(int i=0;i<128;i++) { ph2pr_h[i]=powf(10.f, -((float)i) / 10.f); } cudaError err; int constants_h_int[10]; float constants_h[10]; constants_h[0]=1.329228e+36; constants_h[1]=1.0; constants_h[2]=3.0; constants_h[3]=0.9; constants_h[4]=0.1; constants_h[5]=0.0; constants_h_int[0]=0; constants_h_int[1]=32*8; constants_h_int[2]=32; constants_h_int[3]=4; constants_h_int[4]=3; cudaMemcpyToSymbol(constant,constants_h,sizeof(float)*10 ); cudaMemcpyToSymbol(constant_int,constants_h_int,sizeof(int)*10 ); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // data_prepare+=diff(start,finish); int total=0; char * result_d_total; float read_read, haplotype_haplotype; while(!feof(file)) { total+=size; char useless; useless=fgetc(file); clock_gettime(CLOCK_MONOTONIC_RAW,&start); InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { int read_size; fscanf(file,"%d\n",&inputdata[i].read_size); fscanf(file,"%s ",inputdata[i].read_base); read_size=inputdata[i].read_size; read_read=read_size; for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i]. base_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].ins_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].del_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; if(j<read_size-1) fscanf(file,"%d ",&aa); else fscanf(file,"%d \n",&aa); inputdata[i].gcp_quals[j]=(char)aa; } fscanf(file,"%d\n",&inputdata[i].haplotype_size); fscanf(file, "%s\n",inputdata[i].haplotype_base); haplotype_haplotype=inputdata[i].haplotype_size; } clock_gettime(CLOCK_MONOTONIC_RAW,&finish); read_time+=diff(start,finish); float * result_h=(float *) malloc(sizeof(float)*size); struct timespec start_total,finish_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start_total); char * data_h_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start); std::sort(inputdata, inputdata+size); //32 one chunck. int malloc_size_for_each_chunk=(65*4*32+260*4*32*4+125*4*32) ; int total_size=(size+31)/32*malloc_size_for_each_chunk+(size*sizeof(NUM_ADD)+127)/128*128; data_h_total=(char*)malloc(total_size); err=cudaMalloc( (char **) &result_d_total,total_size+size*sizeof(float)); if(err!=cudaSuccess) printf("Error %d:%s !\n", err, cudaGetErrorString(err)); char * data_d_total=result_d_total; float * result_d=(float *)(result_d_total+total_size);//last part is to store the result. char * data_h=data_h_total; char * data_h_begin=data_h; NUM_ADD *data_num_add=(NUM_ADD *) (data_h); data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment. int data_size=0; //for each chunk int total_in_each=(size+31)/32; for(int i=0;i<total_in_each;i++) { //each is 32 //printf("total_in_each %d\n",total_in_each); //read_base int long_read_size=0; //to find the longest read_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_read_size<inputdata[i*32+j].read_size) long_read_size=inputdata[i*32+j].read_size; } int change_length=(long_read_size+3)/4;//because tile=4; each time deal with 4 read char4 read_base_data[32*65]; for(int kk=0;kk<change_length;kk++) { for(int dd=0;dd<32;dd++) // { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk*4) continue; else read_base_data[kk*32+dd].x=inputdata[i*32+dd].read_base[kk*4]; if(inputdata[i*32+dd].read_size<=kk*4+1) continue; else read_base_data[kk*32+dd].y=inputdata[i*32+dd].read_base[kk*4+1]; if(inputdata[i*32+dd].read_size<=kk*4+2) continue; else read_base_data[kk*32+dd].z=inputdata[i*32+dd].read_base[kk*4+2]; if(inputdata[i*32+dd].read_size<=kk*4+3) continue; else read_base_data[kk*32+dd].w=inputdata[i*32+dd].read_base[kk*4+3]; } } //finish read_base float parameter1[260*32];//Qm//128 do not change to 128 float parameter2[260*32];//QI//128 do not change to 128 float parameter3[260*32];//QD/128 do not change to 128 float parameter4[260*32];//alpha//128 do not change to 128 for(int kk=0;kk<long_read_size;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk) continue; else { parameter1[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].base_quals[kk]&127]; parameter2[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].ins_quals[kk]&127] ; parameter3[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].del_quals[kk]&127] ; parameter4[kk*32+dd]= 1.0f-ph2pr_h[((int)(inputdata[i*32+dd].ins_quals[kk]&127)+(int)( inputdata[i*32+dd].del_quals[kk]&127))&127]; // printf("kk=%d x=%d y=%d z=%d w=%d \n ",kk,parameter1[kk*32+dd],parameter2[kk*32+dd],parameter3[kk*32+dd],parameter4[kk*32+dd] ); } } } //to haplotype into 32 char4 int long_haplotype_size=0; //to find the longest hapltoype_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_haplotype_size<inputdata[i*32+j].haplotype_size) long_haplotype_size=inputdata[i*32+j].haplotype_size; } int haplotype_change_length=(long_haplotype_size+3)/4; char4 haplotype_base_data[32*125]; for(int kk=0;kk<haplotype_change_length;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].haplotype_size<=kk*4) continue; else haplotype_base_data[kk*32+dd].x=inputdata[i*32+dd].haplotype_base[kk*4]; if(inputdata[i*32+dd].haplotype_size<=kk*4+1) continue; else haplotype_base_data[kk*32+dd].y=inputdata[i*32+dd].haplotype_base[kk*4+1]; if(inputdata[i*32+dd].haplotype_size<=kk*4+2) continue; else haplotype_base_data[kk*32+dd].z=inputdata[i*32+dd].haplotype_base[kk*4+2]; if(inputdata[i*32+dd].haplotype_size<=kk*4+3) continue; else haplotype_base_data[kk*32+dd].w=inputdata[i*32+dd].haplotype_base[kk*4+3]; } } //put data address to each pair of read and haplotype. // read address memcpy(data_h,read_base_data,sizeof(char4)*32*change_length);//128 for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_haplotype.x=inputdata[i*32+kk].read_size; data_num_add[i*32+kk].read_haplotype.y=inputdata[i*32+kk].haplotype_size; data_num_add[i*32+kk].Read_array=data_size+sizeof(char4)*kk; // printf("set read size %d %d \n", data_num_add[i*32+kk].read_number,data_num_add[i*32+kk].haplotype_number); } data_h+=sizeof(char4)*32*change_length; data_size+=sizeof(char4)*32*change_length; //parameter address memcpy(data_h,parameter1,sizeof(float)*32*long_read_size); for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_large_length=long_read_size; } data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter2,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter3,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter4,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; //haplotype address memcpy(data_h,haplotype_base_data,sizeof(char4)*32*haplotype_change_length); data_h+=sizeof(char4)*32*haplotype_change_length; data_size+=sizeof(char4)*32*haplotype_change_length; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; char * data_d; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; //printf("data_d_total %p num_add_d %p data_d %p \n",data_d_total, num_add_d,data_d); int blocksize=128; int gridsize=45;//90 dim3 block(blocksize); dim3 grid(gridsize); // global memory to be used by GPU kernels. //float * MG; //float * DG; //float * IG; clock_gettime(CLOCK_MONOTONIC_RAW,&start); err=cudaMemcpy(data_d_total,data_h_begin,data_size_to_copy,cudaMemcpyHostToDevice); if(err!=cudaSuccess) printf("Error %d: %s !\n", err, cudaGetErrorString(err)); //cudaMalloc( (float **)& MG,sizeof(float) *blocksize*gridsize*500*3); //DG=MG+blocksize*gridsize*500;// ???? //IG=DG+blocksize*gridsize*500; //????? //clock_gettime(CLOCK_MONOTONIC_RAW,&finish); //data_prepare+=diff(start,finish); pairHMM<<<grid,block>>> (size,data_d,num_add_d, result_d,MG,DG,IG); cudaMemcpy(result_h,result_d,size*sizeof(float),cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); for(int i=0;i<size;i++) float aa=(log10f((double)result_h[i]) - INI); // printf(" i=%d %e\n",i, result_h[i]); free(data_h_total); cudaFree(result_d_total); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total); total_time+=diff(start_total,finish_total); free(result_h); free(inputdata); fscanf(file,"%d",&size); // if(total>10000) // break; // printf("%d\n",size); } cudaFree(MG); cudaDeviceReset(); printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time); printf("total_time=%e\n",total_time); //printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000); return 0; }
d39d7880b1187f2d2f8d361aca5046f2cefa0994.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __GQD_SIN_COS_CU__ #define __GQD_SIN_COS_CU__ #include "common.hip" __device__ void sincos_taylor(const gqd_real &a, gqd_real &sin_a, gqd_real &cos_a) { const double thresh = 0.5 * _qd_eps * fabs(to_double(a)); gqd_real p, s, t, x; if (is_zero(a)) { sin_a.x = sin_a.y = sin_a.z = sin_a.w = 0.0; cos_a.x = 1.0; cos_a.y = cos_a.z = cos_a.w = 0.0; return; } //x = -sqr(a); x = negative(sqr(a)); s = a; p = a; int i = 0; do { p = p * x; t = p * inv_fact[i]; s = s + t; i = i + 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); sin_a = s; cos_a = sqrt(1.0 - sqr(s)); } __device__ gqd_real sin_taylor(const gqd_real &a) { const double thresh = 0.5 * _qd_eps * fabs(to_double(a)); gqd_real p, s, t, x; if (is_zero(a)) { //return make_qd(0.0); s.x = s.y = s.z = s.w = 0.0; return s; } //x = -sqr(a); x = negative(sqr(a)); s = a; p = a; int i = 0; do { p = p * x; t = p * inv_fact[i]; s = s + t; i += 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); return s; } __device__ gqd_real cos_taylor(const gqd_real &a) { const double thresh = 0.5 * _qd_eps; gqd_real p, s, t, x; if (is_zero(a)) { //return make_qd(1.0); s.x = 1.0; s.y = s.z = s.w = 0.0; return s; } //x = -sqr(a); x = negative(sqr(a)); s = 1.0 + mul_pwr2(x, 0.5); p = x; int i = 1; do { p = p * x; t = p * inv_fact[i]; s = s + t; i += 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); return s; } __device__ gqd_real sin(const gqd_real &a) { gqd_real z, r; if (is_zero(a)) { //return make_qd(0.0); r.x = r.y = r.z = r.w = 0.0; return r; } // approximately reduce modulo 2*pi z = nint(a / _qd_2pi); r = a - _qd_2pi * z; // approximately reduce modulo pi/2 and then modulo pi/1024 double q = floor(r.x / _qd_pi2.x + 0.5); gqd_real t = r - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //gqd_real::error("(gqd_real::sin): Cannot reduce modulo pi/2."); //return gqd_real::_nan; //return make_qd(0.0); r.x = r.y = r.z = r.w = 0.0; return r; } if (abs_k > 256) { //gqd_real::error("(gqd_real::sin): Cannot reduce modulo pi/1024."); //return gqd_real::_nan; //return make_qd( 0.0 ); r.x = r.y = r.z = r.w = 0.0; return r; } if (k == 0) { switch (j) { case 0: return sin_taylor(t); case 1: return cos_taylor(t); case -1: return negative(cos_taylor(t)); default: return negative(sin_taylor(t)); } } //gqd_real sin_t, cos_t; //gqd_real u = d_cos_table[abs_k-1]; //gqd_real v = d_sin_table[abs_k-1]; //sincos_taylor(t, sin_t, cos_t); ///use z and r again to avoid allocate additional memory ///z = sin_t, r = cos_t sincos_taylor(t, z, r); if (j == 0) { z = d_cos_table[abs_k - 1] * z; r = d_sin_table[abs_k - 1] * r; if (k > 0) { //z = d_cos_table[abs_k-1] * z; //r = d_sin_table[abs_k-1] * r; return z + r; } else { //z = d_cos_table[abs_k-1] * z; //r = d_sin_table[abs_k-1] * r; return z - r; } } else if (j == 1) { r = d_cos_table[abs_k - 1] * r; z = d_sin_table[abs_k - 1] * z; if (k > 0) { //r = d_cos_table[abs_k-1] * r; //z = d_sin_table[abs_k-1] * z; return r - z; } else { //r = d_cos_table[abs_k-1] * r; //z = d_sin_table[abs_k-1] * z; return r + z; } } else if (j == -1) { z = d_sin_table[abs_k - 1] * z; r = d_cos_table[abs_k - 1] * r; if (k > 0) { //z = d_sin_table[abs_k-1] * z; //r = d_cos_table[abs_k-1] * r; return z - r; } else { //r = negative(d_cos_table[abs_k-1]) * r; //r = (d_cos_table[abs_k-1]) * r; r.x = -r.x; r.y = -r.y; r.z = -r.z; r.w = -r.w; //z = d_sin_table[abs_k-1] * z; return r - z; } } else { r = d_sin_table[abs_k - 1] * r; z = d_cos_table[abs_k - 1] * z; if (k > 0) { //z = negative(d_cos_table[abs_k-1]) * z; //z = d_cos_table[abs_k-1] * z; z.x = -z.x; z.y = -z.y; z.z = -z.z; z.w = -z.w; //r = d_sin_table[abs_k-1] * r; return z - r; } else { //r = d_sin_table[abs_k-1] * r ; //z = d_cos_table[abs_k-1] * z; return r - z; } } } __device__ gqd_real cos(const gqd_real &a) { if (is_zero(a)) { return make_qd(1.0); } // approximately reduce modulo 2*pi gqd_real z = nint(a / _qd_2pi); gqd_real r = a - _qd_2pi * z; // approximately reduce modulo pi/2 and then modulo pi/1024 double q = floor(r.x / _qd_pi2.x + 0.5); gqd_real t = r - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //qd_real::error("(qd_real::cos): Cannot reduce modulo pi/2."); //return qd_real::_nan; return make_qd(0.0); } if (abs_k > 256) { //qd_real::error("(qd_real::cos): Cannot reduce modulo pi/1024."); //return qd_real::_nan; return make_qd(0.0); } if (k == 0) { switch (j) { case 0: return cos_taylor(t); case 1: return negative(sin_taylor(t)); case -1: return sin_taylor(t); default: return negative(cos_taylor(t)); } } gqd_real sin_t, cos_t; sincos_taylor(t, sin_t, cos_t); gqd_real u = d_cos_table[abs_k - 1]; gqd_real v = d_sin_table[abs_k - 1]; if (j == 0) { if (k > 0) { r = u * cos_t - v * sin_t; } else { r = u * cos_t + v * sin_t; } } else if (j == 1) { if (k > 0) { r = negative(u * sin_t) - v * cos_t; } else { r = v * cos_t - u * sin_t; } } else if (j == -1) { if (k > 0) { r = u * sin_t + v * cos_t; } else { r = u * sin_t - v * cos_t; } } else { if (k > 0) { r = v * sin_t - u * cos_t; } else { r = negative(u * cos_t) - v * sin_t; } } return r; } __device__ void sincos(const gqd_real &a, gqd_real &sin_a, gqd_real &cos_a) { if (is_zero(a)) { sin_a = make_qd(0.0); cos_a = make_qd(1.0); return; } // approximately reduce by 2*pi gqd_real z = nint(a / _qd_2pi); gqd_real t = a - _qd_2pi * z; // approximately reduce by pi/2 and then by pi/1024. double q = floor(t.x / _qd_pi2.x + 0.5); t = t - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //qd_real::error("(qd_real::sincos): Cannot reduce modulo pi/2."); //cos_a = sin_a = qd_real::_nan; cos_a = sin_a = make_qd(0.0); return; } if (abs_k > 256) { //qd_real::error("(qd_real::sincos): Cannot reduce modulo pi/1024."); //cos_a = sin_a = qd_real::_nan; cos_a = sin_a = make_qd(0.0); return; } gqd_real sin_t, cos_t; sincos_taylor(t, sin_t, cos_t); if (k == 0) { if (j == 0) { sin_a = sin_t; cos_a = cos_t; } else if (j == 1) { sin_a = cos_t; cos_a = negative(sin_t); } else if (j == -1) { sin_a = negative(cos_t); cos_a = sin_t; } else { sin_a = negative(sin_t); cos_a = negative(cos_t); } return; } gqd_real u = d_cos_table[abs_k - 1]; gqd_real v = d_sin_table[abs_k - 1]; if (j == 0) { if (k > 0) { sin_a = u * sin_t + v * cos_t; cos_a = u * cos_t - v * sin_t; } else { sin_a = u * sin_t - v * cos_t; cos_a = u * cos_t + v * sin_t; } } else if (j == 1) { if (k > 0) { cos_a = negative(u * sin_t) - v * cos_t; sin_a = u * cos_t - v * sin_t; } else { cos_a = v * cos_t - u * sin_t; sin_a = u * cos_t + v * sin_t; } } else if (j == -1) { if (k > 0) { cos_a = u * sin_t + v * cos_t; sin_a = v * sin_t - u * cos_t; } else { cos_a = u * sin_t - v * cos_t; sin_a = negative(u * cos_t) - v * sin_t; } } else { if (k > 0) { sin_a = negative(u * sin_t) - v * cos_t; cos_a = v * sin_t - u * cos_t; } else { sin_a = v * cos_t - u * sin_t; cos_a = negative(u * cos_t) - v * sin_t; } } } __device__ gqd_real tan(const gqd_real &a) { gqd_real s, c; sincos(a, s, c); return s / c; } #ifdef ALL_MATH __device__ gqd_real atan2(const gqd_real &y, const gqd_real &x) { if (is_zero(x)) { if (is_zero(y)) { // Both x and y is zero. //qd_real::error("(qd_real::atan2): Both arguments zero."); //return qd_real::_nan; return make_qd(0.0); } return (is_positive(y)) ? _qd_pi2 : negative(_qd_pi2); } else if (is_zero(y)) { return (is_positive(x)) ? make_qd(0.0) : _qd_pi; } if (x == y) { return (is_positive(y)) ? _qd_pi4 : negative(_qd_3pi4); } if (x == negative(y)) { return (is_positive(y)) ? _qd_3pi4 : negative(_qd_pi4); } gqd_real r = sqrt(sqr(x) + sqr(y)); gqd_real xx = x / r; gqd_real yy = y / r; gqd_real z = make_qd(atan2(to_double(y), to_double(x))); gqd_real sin_z, cos_z; if (abs(xx.x) > abs(yy.x)) { sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; } else { sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; } return z; } __device__ gqd_real atan(const gqd_real &a) { return atan2(a, make_qd(1.0)); } __device__ gqd_real asin(const gqd_real &a) { gqd_real abs_a = abs(a); if (abs_a > 1.0) { //qd_real::error("(qd_real::asin): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } if (is_one(abs_a)) { return (is_positive(a)) ? _qd_pi2 : negative(_qd_pi2); } return atan2(a, sqrt(1.0 - sqr(a))); } __device__ gqd_real acos(const gqd_real &a) { gqd_real abs_a = abs(a); if (abs_a > 1.0) { //qd_real::error("(qd_real::acos): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } if (is_one(abs_a)) { return (is_positive(a)) ? make_qd(0.0) : _qd_pi; } return atan2(sqrt(1.0 - sqr(a)), a); } __device__ gqd_real sinh(const gqd_real &a) { if (is_zero(a)) { return make_qd(0.0); } if (abs(a) > 0.05) { gqd_real ea = exp(a); return mul_pwr2(ea - inv(ea), 0.5); } gqd_real s = a; gqd_real t = a; gqd_real r = sqr(t); double m = 1.0; double thresh = abs(to_double(a) * _qd_eps); do { m = m + 2.0; t = (t * r); t = t / ((m - 1) * m); s = s + t; } while (abs(t) > thresh); return s; } __device__ gqd_real cosh(const gqd_real &a) { if (is_zero(a)) { return make_qd(1.0); } gqd_real ea = exp(a); return mul_pwr2(ea + inv(ea), 0.5); } __device__ gqd_real tanh(const gqd_real &a) { if (is_zero(a)) { return make_qd(0.0); } if (abs(to_double(a)) > 0.05) { gqd_real ea = exp(a); gqd_real inv_ea = inv(ea); return (ea - inv_ea) / (ea + inv_ea); } else { gqd_real s, c; s = sinh(a); c = sqrt(1.0 + sqr(s)); return s / c; } } __device__ void sincosh(const gqd_real &a, gqd_real &s, gqd_real &c) { if (abs(to_double(a)) <= 0.05) { s = sinh(a); c = sqrt(1.0 + sqr(s)); } else { gqd_real ea = exp(a); gqd_real inv_ea = inv(ea); s = mul_pwr2(ea - inv_ea, 0.5); c = mul_pwr2(ea + inv_ea, 0.5); } } __device__ gqd_real asinh(const gqd_real &a) { return log(a + sqrt(sqr(a) + 1.0)); } __device__ gqd_real acosh(const gqd_real &a) { if (a < 1.0) { ///qd_real::error("(qd_real::acosh): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } return log(a + sqrt(sqr(a) - 1.0)); } __device__ gqd_real atanh(const gqd_real &a) { if (abs(a) >= 1.0) { //qd_real::error("(qd_real::atanh): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } return mul_pwr2(log((1.0 + a) / (1.0 - a)), 0.5); } #else __device__ gqd_real atan2(const gqd_real &y, const gqd_real &x) { return make_qd(0.0); } __device__ gqd_real atan(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real asin(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real acos(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real sinh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real cosh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real tanh(const gqd_real &a) { return make_qd(0.0); } __device__ void sincosh(const gqd_real &a, gqd_real &s, gqd_real &c) { } __device__ gqd_real asinh(const gqd_real &a) { return log(a + sqrt(sqr(a) + 1.0)); } __device__ gqd_real acosh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real atanh(const gqd_real &a) { return make_qd(0.0); } #endif /* ALL_MATH */ #endif /* __GQD_SIN_COS_CU__ */
d39d7880b1187f2d2f8d361aca5046f2cefa0994.cu
#ifndef __GQD_SIN_COS_CU__ #define __GQD_SIN_COS_CU__ #include "common.cu" __device__ void sincos_taylor(const gqd_real &a, gqd_real &sin_a, gqd_real &cos_a) { const double thresh = 0.5 * _qd_eps * fabs(to_double(a)); gqd_real p, s, t, x; if (is_zero(a)) { sin_a.x = sin_a.y = sin_a.z = sin_a.w = 0.0; cos_a.x = 1.0; cos_a.y = cos_a.z = cos_a.w = 0.0; return; } //x = -sqr(a); x = negative(sqr(a)); s = a; p = a; int i = 0; do { p = p * x; t = p * inv_fact[i]; s = s + t; i = i + 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); sin_a = s; cos_a = sqrt(1.0 - sqr(s)); } __device__ gqd_real sin_taylor(const gqd_real &a) { const double thresh = 0.5 * _qd_eps * fabs(to_double(a)); gqd_real p, s, t, x; if (is_zero(a)) { //return make_qd(0.0); s.x = s.y = s.z = s.w = 0.0; return s; } //x = -sqr(a); x = negative(sqr(a)); s = a; p = a; int i = 0; do { p = p * x; t = p * inv_fact[i]; s = s + t; i += 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); return s; } __device__ gqd_real cos_taylor(const gqd_real &a) { const double thresh = 0.5 * _qd_eps; gqd_real p, s, t, x; if (is_zero(a)) { //return make_qd(1.0); s.x = 1.0; s.y = s.z = s.w = 0.0; return s; } //x = -sqr(a); x = negative(sqr(a)); s = 1.0 + mul_pwr2(x, 0.5); p = x; int i = 1; do { p = p * x; t = p * inv_fact[i]; s = s + t; i += 2; } while (i < n_inv_fact && fabs(to_double(t)) > thresh); return s; } __device__ gqd_real sin(const gqd_real &a) { gqd_real z, r; if (is_zero(a)) { //return make_qd(0.0); r.x = r.y = r.z = r.w = 0.0; return r; } // approximately reduce modulo 2*pi z = nint(a / _qd_2pi); r = a - _qd_2pi * z; // approximately reduce modulo pi/2 and then modulo pi/1024 double q = floor(r.x / _qd_pi2.x + 0.5); gqd_real t = r - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //gqd_real::error("(gqd_real::sin): Cannot reduce modulo pi/2."); //return gqd_real::_nan; //return make_qd(0.0); r.x = r.y = r.z = r.w = 0.0; return r; } if (abs_k > 256) { //gqd_real::error("(gqd_real::sin): Cannot reduce modulo pi/1024."); //return gqd_real::_nan; //return make_qd( 0.0 ); r.x = r.y = r.z = r.w = 0.0; return r; } if (k == 0) { switch (j) { case 0: return sin_taylor(t); case 1: return cos_taylor(t); case -1: return negative(cos_taylor(t)); default: return negative(sin_taylor(t)); } } //gqd_real sin_t, cos_t; //gqd_real u = d_cos_table[abs_k-1]; //gqd_real v = d_sin_table[abs_k-1]; //sincos_taylor(t, sin_t, cos_t); ///use z and r again to avoid allocate additional memory ///z = sin_t, r = cos_t sincos_taylor(t, z, r); if (j == 0) { z = d_cos_table[abs_k - 1] * z; r = d_sin_table[abs_k - 1] * r; if (k > 0) { //z = d_cos_table[abs_k-1] * z; //r = d_sin_table[abs_k-1] * r; return z + r; } else { //z = d_cos_table[abs_k-1] * z; //r = d_sin_table[abs_k-1] * r; return z - r; } } else if (j == 1) { r = d_cos_table[abs_k - 1] * r; z = d_sin_table[abs_k - 1] * z; if (k > 0) { //r = d_cos_table[abs_k-1] * r; //z = d_sin_table[abs_k-1] * z; return r - z; } else { //r = d_cos_table[abs_k-1] * r; //z = d_sin_table[abs_k-1] * z; return r + z; } } else if (j == -1) { z = d_sin_table[abs_k - 1] * z; r = d_cos_table[abs_k - 1] * r; if (k > 0) { //z = d_sin_table[abs_k-1] * z; //r = d_cos_table[abs_k-1] * r; return z - r; } else { //r = negative(d_cos_table[abs_k-1]) * r; //r = (d_cos_table[abs_k-1]) * r; r.x = -r.x; r.y = -r.y; r.z = -r.z; r.w = -r.w; //z = d_sin_table[abs_k-1] * z; return r - z; } } else { r = d_sin_table[abs_k - 1] * r; z = d_cos_table[abs_k - 1] * z; if (k > 0) { //z = negative(d_cos_table[abs_k-1]) * z; //z = d_cos_table[abs_k-1] * z; z.x = -z.x; z.y = -z.y; z.z = -z.z; z.w = -z.w; //r = d_sin_table[abs_k-1] * r; return z - r; } else { //r = d_sin_table[abs_k-1] * r ; //z = d_cos_table[abs_k-1] * z; return r - z; } } } __device__ gqd_real cos(const gqd_real &a) { if (is_zero(a)) { return make_qd(1.0); } // approximately reduce modulo 2*pi gqd_real z = nint(a / _qd_2pi); gqd_real r = a - _qd_2pi * z; // approximately reduce modulo pi/2 and then modulo pi/1024 double q = floor(r.x / _qd_pi2.x + 0.5); gqd_real t = r - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //qd_real::error("(qd_real::cos): Cannot reduce modulo pi/2."); //return qd_real::_nan; return make_qd(0.0); } if (abs_k > 256) { //qd_real::error("(qd_real::cos): Cannot reduce modulo pi/1024."); //return qd_real::_nan; return make_qd(0.0); } if (k == 0) { switch (j) { case 0: return cos_taylor(t); case 1: return negative(sin_taylor(t)); case -1: return sin_taylor(t); default: return negative(cos_taylor(t)); } } gqd_real sin_t, cos_t; sincos_taylor(t, sin_t, cos_t); gqd_real u = d_cos_table[abs_k - 1]; gqd_real v = d_sin_table[abs_k - 1]; if (j == 0) { if (k > 0) { r = u * cos_t - v * sin_t; } else { r = u * cos_t + v * sin_t; } } else if (j == 1) { if (k > 0) { r = negative(u * sin_t) - v * cos_t; } else { r = v * cos_t - u * sin_t; } } else if (j == -1) { if (k > 0) { r = u * sin_t + v * cos_t; } else { r = u * sin_t - v * cos_t; } } else { if (k > 0) { r = v * sin_t - u * cos_t; } else { r = negative(u * cos_t) - v * sin_t; } } return r; } __device__ void sincos(const gqd_real &a, gqd_real &sin_a, gqd_real &cos_a) { if (is_zero(a)) { sin_a = make_qd(0.0); cos_a = make_qd(1.0); return; } // approximately reduce by 2*pi gqd_real z = nint(a / _qd_2pi); gqd_real t = a - _qd_2pi * z; // approximately reduce by pi/2 and then by pi/1024. double q = floor(t.x / _qd_pi2.x + 0.5); t = t - _qd_pi2 * q; int j = (int) (q); q = floor(t.x / _qd_pi1024.x + 0.5); t = t - _qd_pi1024 * q; int k = (int) (q); int abs_k = abs(k); if (j < -2 || j > 2) { //qd_real::error("(qd_real::sincos): Cannot reduce modulo pi/2."); //cos_a = sin_a = qd_real::_nan; cos_a = sin_a = make_qd(0.0); return; } if (abs_k > 256) { //qd_real::error("(qd_real::sincos): Cannot reduce modulo pi/1024."); //cos_a = sin_a = qd_real::_nan; cos_a = sin_a = make_qd(0.0); return; } gqd_real sin_t, cos_t; sincos_taylor(t, sin_t, cos_t); if (k == 0) { if (j == 0) { sin_a = sin_t; cos_a = cos_t; } else if (j == 1) { sin_a = cos_t; cos_a = negative(sin_t); } else if (j == -1) { sin_a = negative(cos_t); cos_a = sin_t; } else { sin_a = negative(sin_t); cos_a = negative(cos_t); } return; } gqd_real u = d_cos_table[abs_k - 1]; gqd_real v = d_sin_table[abs_k - 1]; if (j == 0) { if (k > 0) { sin_a = u * sin_t + v * cos_t; cos_a = u * cos_t - v * sin_t; } else { sin_a = u * sin_t - v * cos_t; cos_a = u * cos_t + v * sin_t; } } else if (j == 1) { if (k > 0) { cos_a = negative(u * sin_t) - v * cos_t; sin_a = u * cos_t - v * sin_t; } else { cos_a = v * cos_t - u * sin_t; sin_a = u * cos_t + v * sin_t; } } else if (j == -1) { if (k > 0) { cos_a = u * sin_t + v * cos_t; sin_a = v * sin_t - u * cos_t; } else { cos_a = u * sin_t - v * cos_t; sin_a = negative(u * cos_t) - v * sin_t; } } else { if (k > 0) { sin_a = negative(u * sin_t) - v * cos_t; cos_a = v * sin_t - u * cos_t; } else { sin_a = v * cos_t - u * sin_t; cos_a = negative(u * cos_t) - v * sin_t; } } } __device__ gqd_real tan(const gqd_real &a) { gqd_real s, c; sincos(a, s, c); return s / c; } #ifdef ALL_MATH __device__ gqd_real atan2(const gqd_real &y, const gqd_real &x) { if (is_zero(x)) { if (is_zero(y)) { // Both x and y is zero. //qd_real::error("(qd_real::atan2): Both arguments zero."); //return qd_real::_nan; return make_qd(0.0); } return (is_positive(y)) ? _qd_pi2 : negative(_qd_pi2); } else if (is_zero(y)) { return (is_positive(x)) ? make_qd(0.0) : _qd_pi; } if (x == y) { return (is_positive(y)) ? _qd_pi4 : negative(_qd_3pi4); } if (x == negative(y)) { return (is_positive(y)) ? _qd_3pi4 : negative(_qd_pi4); } gqd_real r = sqrt(sqr(x) + sqr(y)); gqd_real xx = x / r; gqd_real yy = y / r; gqd_real z = make_qd(atan2(to_double(y), to_double(x))); gqd_real sin_z, cos_z; if (abs(xx.x) > abs(yy.x)) { sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; sincos(z, sin_z, cos_z); z = z + (yy - sin_z) / cos_z; } else { sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; sincos(z, sin_z, cos_z); z = z - (xx - cos_z) / sin_z; } return z; } __device__ gqd_real atan(const gqd_real &a) { return atan2(a, make_qd(1.0)); } __device__ gqd_real asin(const gqd_real &a) { gqd_real abs_a = abs(a); if (abs_a > 1.0) { //qd_real::error("(qd_real::asin): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } if (is_one(abs_a)) { return (is_positive(a)) ? _qd_pi2 : negative(_qd_pi2); } return atan2(a, sqrt(1.0 - sqr(a))); } __device__ gqd_real acos(const gqd_real &a) { gqd_real abs_a = abs(a); if (abs_a > 1.0) { //qd_real::error("(qd_real::acos): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } if (is_one(abs_a)) { return (is_positive(a)) ? make_qd(0.0) : _qd_pi; } return atan2(sqrt(1.0 - sqr(a)), a); } __device__ gqd_real sinh(const gqd_real &a) { if (is_zero(a)) { return make_qd(0.0); } if (abs(a) > 0.05) { gqd_real ea = exp(a); return mul_pwr2(ea - inv(ea), 0.5); } gqd_real s = a; gqd_real t = a; gqd_real r = sqr(t); double m = 1.0; double thresh = abs(to_double(a) * _qd_eps); do { m = m + 2.0; t = (t * r); t = t / ((m - 1) * m); s = s + t; } while (abs(t) > thresh); return s; } __device__ gqd_real cosh(const gqd_real &a) { if (is_zero(a)) { return make_qd(1.0); } gqd_real ea = exp(a); return mul_pwr2(ea + inv(ea), 0.5); } __device__ gqd_real tanh(const gqd_real &a) { if (is_zero(a)) { return make_qd(0.0); } if (abs(to_double(a)) > 0.05) { gqd_real ea = exp(a); gqd_real inv_ea = inv(ea); return (ea - inv_ea) / (ea + inv_ea); } else { gqd_real s, c; s = sinh(a); c = sqrt(1.0 + sqr(s)); return s / c; } } __device__ void sincosh(const gqd_real &a, gqd_real &s, gqd_real &c) { if (abs(to_double(a)) <= 0.05) { s = sinh(a); c = sqrt(1.0 + sqr(s)); } else { gqd_real ea = exp(a); gqd_real inv_ea = inv(ea); s = mul_pwr2(ea - inv_ea, 0.5); c = mul_pwr2(ea + inv_ea, 0.5); } } __device__ gqd_real asinh(const gqd_real &a) { return log(a + sqrt(sqr(a) + 1.0)); } __device__ gqd_real acosh(const gqd_real &a) { if (a < 1.0) { ///qd_real::error("(qd_real::acosh): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } return log(a + sqrt(sqr(a) - 1.0)); } __device__ gqd_real atanh(const gqd_real &a) { if (abs(a) >= 1.0) { //qd_real::error("(qd_real::atanh): Argument out of domain."); //return qd_real::_nan; return make_qd(0.0); } return mul_pwr2(log((1.0 + a) / (1.0 - a)), 0.5); } #else __device__ gqd_real atan2(const gqd_real &y, const gqd_real &x) { return make_qd(0.0); } __device__ gqd_real atan(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real asin(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real acos(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real sinh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real cosh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real tanh(const gqd_real &a) { return make_qd(0.0); } __device__ void sincosh(const gqd_real &a, gqd_real &s, gqd_real &c) { } __device__ gqd_real asinh(const gqd_real &a) { return log(a + sqrt(sqr(a) + 1.0)); } __device__ gqd_real acosh(const gqd_real &a) { return make_qd(0.0); } __device__ gqd_real atanh(const gqd_real &a) { return make_qd(0.0); } #endif /* ALL_MATH */ #endif /* __GQD_SIN_COS_CU__ */
074b7bd3638ff566d5f88396f95df5ca74f4b3a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file spp_gpu.cu // @brief SPP block implementation (GPU) // @author Hakan Bilen #include "spp.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* spp_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_average_kernel (T* pooled, const T* data, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); int offset_data = (roi_image * depth + pc) * (width*height); data += offset_data; T bestValue = 0; const T coef = 1.f / (T)((wend-wstart) * (hend-hstart)); for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int index = w * height + h ; bestValue += data[index] * coef; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* spp_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_max_kernel(T* pooled, const T* data, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int offset_data = (roi_image * depth + pc) * (width*height); data += offset_data; T bestValue = is_empty ? 0 : data[wstart * height + hstart]; for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int index = w * height + h ; bestValue = max(bestValue, data[index]) ; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* spp_max_backward */ /* ---------------------------------------------------------------- */ // an implementation of atomicAdd() for double (really slow) static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template<typename T> __global__ void spp_max_backward_kernel(T* derData, const T* data, const T* derPooled, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); data += (roi_image * depth + pc) * (width*height); derData += (roi_image * depth + pc) * (width*height); int bestIndex = wstart * height + hstart; T bestValue = is_empty ? 0 : data[bestIndex]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int index = w * height + h ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ; } } /* ---------------------------------------------------------------- */ /* spp_average_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_average_backward_kernel (T* derData, const T* data, const T* derPooled, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); data += (roi_image * depth + pc) * (width*height); derData += (roi_image * depth + pc) * (width*height); const T coef = 1.f / (T)((wend-wstart)*(hend-hstart)); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int index = w * height + h ; /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + index, derPooled[pooledIndex] * coef) ; } } } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct spp_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; hipLaunchKernelGGL(( spp_max_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; hipLaunchKernelGGL(( spp_max_backward_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, derPooled, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // spp_max template <typename type> struct spp_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; hipLaunchKernelGGL(( spp_average_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const * levels, size_t numROIs, type const * ROIs) { int pooledVolume = numTotBins * depth * numROIs; hipLaunchKernelGGL(( spp_average_backward_kernel<type>) , dim3(divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, derPooled, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // spp_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::spp_max<vl::VLDT_GPU, float> ; template struct vl::impl::spp_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::spp_max<vl::VLDT_GPU, double> ; template struct vl::impl::spp_average<vl::VLDT_GPU, double> ; #endif
074b7bd3638ff566d5f88396f95df5ca74f4b3a0.cu
// @file spp_gpu.cu // @brief SPP block implementation (GPU) // @author Hakan Bilen #include "spp.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* spp_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_average_kernel (T* pooled, const T* data, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); int offset_data = (roi_image * depth + pc) * (width*height); data += offset_data; T bestValue = 0; const T coef = 1.f / (T)((wend-wstart) * (hend-hstart)); for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int index = w * height + h ; bestValue += data[index] * coef; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* spp_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_max_kernel(T* pooled, const T* data, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int offset_data = (roi_image * depth + pc) * (width*height); data += offset_data; T bestValue = is_empty ? 0 : data[wstart * height + hstart]; for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int index = w * height + h ; bestValue = max(bestValue, data[index]) ; } } pooled[pooledIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* spp_max_backward */ /* ---------------------------------------------------------------- */ // an implementation of atomicAdd() for double (really slow) static __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template<typename T> __global__ void spp_max_backward_kernel(T* derData, const T* data, const T* derPooled, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); data += (roi_image * depth + pc) * (width*height); derData += (roi_image * depth + pc) * (width*height); int bestIndex = wstart * height + hstart; T bestValue = is_empty ? 0 : data[bestIndex]; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int index = w * height + h ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + bestIndex, derPooled[pooledIndex]) ; } } /* ---------------------------------------------------------------- */ /* spp_average_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void spp_average_backward_kernel (T* derData, const T* data, const T* derPooled, const int height, const int width, const int depth, const int size, const int numTotBins, const int numLevels, const T* levels, const int numROIs, const T* ROIs) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; int pooledVolume = numTotBins * depth * numROIs; if (pooledIndex < pooledVolume) { int pl = pooledIndex % numTotBins; int pc = (pooledIndex / numTotBins) % depth; int pr = (pooledIndex / numTotBins / depth); // roi no int roi_image = ROIs[5 * pr + 0]; int roi_start_h = ROIs[5 * pr + 1]; int roi_start_w = ROIs[5 * pr + 2]; int roi_end_h = ROIs[5 * pr + 3]; int roi_end_w = ROIs[5 * pr + 4]; if(roi_start_w==roi_end_w) { if(roi_start_w>0) roi_start_w--; else roi_end_w++; } if(roi_start_h==roi_end_h) { if(roi_start_h>0) roi_start_h--; else roi_end_h++; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // Find pyramid level and bin int pb = -1; int pLevel = -1; int numBins = 0; for(int l=0;l<numLevels;l++) { if(pl-numBins>=0 && pl-numBins<static_cast<int>(levels[l] * levels[l])) { pb = pl - numBins; pLevel = l; } numBins += static_cast<int>(levels[l] * levels[l]); } int pooledWidth = levels[pLevel]; int pooledHeight = levels[pLevel]; int pw = pb / pooledHeight; int ph = pb % pooledHeight; const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooledHeight); const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooledWidth); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); data += (roi_image * depth + pc) * (width*height); derData += (roi_image * depth + pc) * (width*height); const T coef = 1.f / (T)((wend-wstart)*(hend-hstart)); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int index = w * height + h ; /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ atomicAdd(derData + index, derPooled[pooledIndex] * coef) ; } } } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct spp_max<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; spp_max_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (pooled, data, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; spp_max_backward_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, derPooled, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // spp_max template <typename type> struct spp_average<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const* levels, size_t numROIs, type const* ROIs) { int pooledVolume = numTotBins * depth * numROIs; spp_average_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (pooled, data, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t height, size_t width, size_t depth, size_t size, size_t numTotBins, size_t numLevels, type const * levels, size_t numROIs, type const * ROIs) { int pooledVolume = numTotBins * depth * numROIs; spp_average_backward_kernel<type> <<< divideAndRoundUp(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, derPooled, height, width, depth, size, numTotBins, numLevels, levels, numROIs, ROIs); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // spp_average } } ; // namespace vl::impl // Instantiations template struct vl::impl::spp_max<vl::VLDT_GPU, float> ; template struct vl::impl::spp_average<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::spp_max<vl::VLDT_GPU, double> ; template struct vl::impl::spp_average<vl::VLDT_GPU, double> ; #endif
5dfcb44d26710a88cfa1accc134ffef96ec316b0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #define WIDTH 256 #define HEIGHT 256 #define NSUBSAMPLES 2 #define NAO_SAMPLES 8 #define BLOCK_SIZE 16 typedef struct _vec { float x; float y; float z; } vec; typedef struct _Isect { float t; vec p; vec n; int hit; } Isect; typedef struct _Sphere { vec center; float radius; } Sphere; typedef struct _Plane { vec p; vec n; } Plane; typedef struct _Ray { vec org; vec dir; } Ray; __device__ static float vdot(vec v0, vec v1) { return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z; } __device__ static void vcross(vec *c, vec v0, vec v1) { c->x = v0.y * v1.z - v0.z * v1.y; c->y = v0.z * v1.x - v0.x * v1.z; c->z = v0.x * v1.y - v0.y * v1.x; } __device__ static void vnormalize(vec *c) { float length = sqrtf(vdot((*c), (*c))); if (fabs(length) > 1.0e-17f) { c->x /= length; c->y /= length; c->z /= length; } } __device__ void ray_sphere_intersect(Isect *isect, const Ray *ray, const Sphere *sphere) { vec rs; rs.x = ray->org.x - sphere->center.x; rs.y = ray->org.y - sphere->center.y; rs.z = ray->org.z - sphere->center.z; float B = vdot(rs, ray->dir); float C = vdot(rs, rs) - sphere->radius * sphere->radius; float D = B * B - C; if (D > 0.f) { float t = -B - sqrtf(D); if ((t > 0.f) && (t < isect->t)) { isect->t = t; isect->hit = 1; isect->p.x = ray->org.x + ray->dir.x * t; isect->p.y = ray->org.y + ray->dir.y * t; isect->p.z = ray->org.z + ray->dir.z * t; isect->n.x = isect->p.x - sphere->center.x; isect->n.y = isect->p.y - sphere->center.y; isect->n.z = isect->p.z - sphere->center.z; vnormalize(&(isect->n)); } } } __device__ void ray_plane_intersect(Isect *isect, const Ray *ray, const Plane *plane) { float d = -vdot(plane->p, plane->n); float v = vdot(ray->dir, plane->n); if (fabsf(v) < 1.0e-17f) return; float t = -(vdot(ray->org, plane->n) + d) / v; if ((t > 0.f) && (t < isect->t)) { isect->t = t; isect->hit = 1; isect->p.x = ray->org.x + ray->dir.x * t; isect->p.y = ray->org.y + ray->dir.y * t; isect->p.z = ray->org.z + ray->dir.z * t; isect->n = plane->n; } } __device__ void orthoBasis(vec *basis, vec n) { basis[2] = n; basis[1].x = 0.f; basis[1].y = 0.f; basis[1].z = 0.f; if ((n.x < 0.6f) && (n.x > -0.6f)) { basis[1].x = 1.0f; } else if ((n.y < 0.6f) && (n.y > -0.6f)) { basis[1].y = 1.0f; } else if ((n.z < 0.6f) && (n.z > -0.6f)) { basis[1].z = 1.0f; } else { basis[1].x = 1.0f; } vcross(&basis[0], basis[1], basis[2]); vnormalize(&basis[0]); vcross(&basis[1], basis[2], basis[0]); vnormalize(&basis[1]); } class RNG { public: unsigned int x; const int fmask = (1 << 23) - 1; __device__ RNG(const unsigned int seed) { x = seed; } __device__ int next() { x ^= x >> 6; x ^= x << 17; x ^= x >> 9; return int(x); } __device__ float operator()(void) { union { float f; int i; } u; u.i = (next() & fmask) | 0x3f800000; return u.f - 1.f; } }; __device__ void ambient_occlusion(vec *col, const Isect *isect, const Sphere *spheres, const Plane *plane, RNG &rng) { int i, j; int ntheta = NAO_SAMPLES; int nphi = NAO_SAMPLES; float eps = 0.0001f; vec p; p.x = isect->p.x + eps * isect->n.x; p.y = isect->p.y + eps * isect->n.y; p.z = isect->p.z + eps * isect->n.z; vec basis[3]; orthoBasis(basis, isect->n); float occlusion = 0.f; for (j = 0; j < ntheta; j++) { for (i = 0; i < nphi; i++) { float theta = sqrtf(rng()); float phi = 2.0f * (float)M_PI * rng(); float x = cosf(phi) * theta; float y = sinf(phi) * theta; float z = sqrtf(1.0f - theta * theta); // local -> global float rx = x * basis[0].x + y * basis[1].x + z * basis[2].x; float ry = x * basis[0].y + y * basis[1].y + z * basis[2].y; float rz = x * basis[0].z + y * basis[1].z + z * basis[2].z; Ray ray; ray.org = p; ray.dir.x = rx; ray.dir.y = ry; ray.dir.z = rz; Isect occIsect; occIsect.t = 1.0e+17f; occIsect.hit = 0; ray_sphere_intersect(&occIsect, &ray, spheres); ray_sphere_intersect(&occIsect, &ray, spheres+1); ray_sphere_intersect(&occIsect, &ray, spheres+2); ray_plane_intersect (&occIsect, &ray, plane); if (occIsect.hit) occlusion += 1.f; } } occlusion = (ntheta * nphi - occlusion) / (float)(ntheta * nphi); col->x = occlusion; col->y = occlusion; col->z = occlusion; } __device__ unsigned char clamp(float f) { int i = (int)(f * 255.5f); if (i < 0) i = 0; if (i > 255) i = 255; return (unsigned char)i; } void init_scene(Sphere* spheres, Plane &plane) { spheres[0].center.x = -2.0f; spheres[0].center.y = 0.0f; spheres[0].center.z = -3.5f; spheres[0].radius = 0.5f; spheres[1].center.x = -0.5f; spheres[1].center.y = 0.0f; spheres[1].center.z = -3.0f; spheres[1].radius = 0.5f; spheres[2].center.x = 1.0f; spheres[2].center.y = 0.0f; spheres[2].center.z = -2.2f; spheres[2].radius = 0.5f; plane.p.x = 0.0f; plane.p.y = -0.5f; plane.p.z = 0.0f; plane.n.x = 0.0f; plane.n.y = 1.0f; plane.n.z = 0.0f; } void saveppm(const char *fname, int w, int h, unsigned char *img) { FILE *fp; fp = fopen(fname, "wb"); assert(fp); fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n", w, h); fprintf(fp, "255\n"); fwrite(img, w * h * 3, 1, fp); fclose(fp); } __global__ void render_kernel (unsigned char *fimg, const Sphere *spheres, const Plane plane, const int h, const int w, const int nsubsamples) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { RNG rng(y * w + x); float s0 = 0; float s1 = 0; float s2 = 0; for(int v = 0; v < nsubsamples; v++ ) { for(int u = 0; u < nsubsamples; u++ ) { float px = ( x + ( u / ( float )nsubsamples ) - ( w / 2.0f ) ) / ( w / 2.0f ); float py = -( y + ( v / ( float )nsubsamples ) - ( h / 2.0f ) ) / ( h / 2.0f ); Ray ray; ray.org.x = 0.f; ray.org.y = 0.f; ray.org.z = 0.f; ray.dir.x = px; ray.dir.y = py; ray.dir.z = -1.f; vnormalize( &( ray.dir ) ); Isect isect; isect.t = 1.0e+17f; isect.hit = 0; ray_sphere_intersect( &isect, &ray, spheres ); ray_sphere_intersect( &isect, &ray, spheres + 1 ); ray_sphere_intersect( &isect, &ray, spheres + 2 ); ray_plane_intersect ( &isect, &ray, &plane ); if( isect.hit ) { vec col; ambient_occlusion( &col, &isect, spheres, &plane, rng ); s0 += col.x; s1 += col.y; s2 += col.z; } } } fimg[ 3 * ( y * w + x ) + 0 ] = clamp ( s0 / ( float )( nsubsamples * nsubsamples ) ); fimg[ 3 * ( y * w + x ) + 1 ] = clamp ( s1 / ( float )( nsubsamples * nsubsamples ) ); fimg[ 3 * ( y * w + x ) + 2 ] = clamp ( s2 / ( float )( nsubsamples * nsubsamples ) ); } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void render(unsigned char *img, int w, int h, int nsubsamples, const Sphere* spheres, const Plane &plane) { unsigned char *d_img; Sphere *d_spheres; gpuErrchk( hipMalloc((void**)&d_img, sizeof(unsigned char) * w * h * 3) ); gpuErrchk( hipMalloc((void**)&d_spheres, sizeof(Sphere) * 3) ); gpuErrchk( hipMemcpy(d_spheres, spheres, sizeof(Sphere) * 3, hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( render_kernel) , dim3(dim3((w+BLOCK_SIZE-1)/BLOCK_SIZE, (h+BLOCK_SIZE-1)/BLOCK_SIZE)), dim3(dim3(BLOCK_SIZE, BLOCK_SIZE)) , 0, 0, d_img, d_spheres, plane, h, w, nsubsamples); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipMemcpy(img, d_img, sizeof(unsigned char) * w * h * 3, hipMemcpyDeviceToHost) ); hipFree(d_img); hipFree(d_spheres); } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <iterations>\n", argv[0]); return 1; } const int LOOPMAX = atoi(argv[1]); // three spheres in the image Sphere spheres[3]; Plane plane; init_scene(spheres, plane); unsigned char *img = ( unsigned char * )malloc( WIDTH * HEIGHT * 3 ); clock_t start; start = clock(); for( int i = 0; i < LOOPMAX; ++i ){ render( img, WIDTH, HEIGHT, NSUBSAMPLES, spheres, plane ); } clock_t end = clock(); float delta = ( float )end - ( float )start; float msec = delta * 1000.0 / ( float )CLOCKS_PER_SEC; printf( "Total render time (%d iterations): %f sec.\n", LOOPMAX, msec / 1000.0 ); printf( "Average render time: %f sec.\n", msec / 1000.0 / (float)LOOPMAX ); saveppm( "ao.ppm", WIDTH, HEIGHT, img ); free( img ); return 0; }
5dfcb44d26710a88cfa1accc134ffef96ec316b0.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <time.h> #include <cuda.h> #define WIDTH 256 #define HEIGHT 256 #define NSUBSAMPLES 2 #define NAO_SAMPLES 8 #define BLOCK_SIZE 16 typedef struct _vec { float x; float y; float z; } vec; typedef struct _Isect { float t; vec p; vec n; int hit; } Isect; typedef struct _Sphere { vec center; float radius; } Sphere; typedef struct _Plane { vec p; vec n; } Plane; typedef struct _Ray { vec org; vec dir; } Ray; __device__ static float vdot(vec v0, vec v1) { return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z; } __device__ static void vcross(vec *c, vec v0, vec v1) { c->x = v0.y * v1.z - v0.z * v1.y; c->y = v0.z * v1.x - v0.x * v1.z; c->z = v0.x * v1.y - v0.y * v1.x; } __device__ static void vnormalize(vec *c) { float length = sqrtf(vdot((*c), (*c))); if (fabs(length) > 1.0e-17f) { c->x /= length; c->y /= length; c->z /= length; } } __device__ void ray_sphere_intersect(Isect *isect, const Ray *ray, const Sphere *sphere) { vec rs; rs.x = ray->org.x - sphere->center.x; rs.y = ray->org.y - sphere->center.y; rs.z = ray->org.z - sphere->center.z; float B = vdot(rs, ray->dir); float C = vdot(rs, rs) - sphere->radius * sphere->radius; float D = B * B - C; if (D > 0.f) { float t = -B - sqrtf(D); if ((t > 0.f) && (t < isect->t)) { isect->t = t; isect->hit = 1; isect->p.x = ray->org.x + ray->dir.x * t; isect->p.y = ray->org.y + ray->dir.y * t; isect->p.z = ray->org.z + ray->dir.z * t; isect->n.x = isect->p.x - sphere->center.x; isect->n.y = isect->p.y - sphere->center.y; isect->n.z = isect->p.z - sphere->center.z; vnormalize(&(isect->n)); } } } __device__ void ray_plane_intersect(Isect *isect, const Ray *ray, const Plane *plane) { float d = -vdot(plane->p, plane->n); float v = vdot(ray->dir, plane->n); if (fabsf(v) < 1.0e-17f) return; float t = -(vdot(ray->org, plane->n) + d) / v; if ((t > 0.f) && (t < isect->t)) { isect->t = t; isect->hit = 1; isect->p.x = ray->org.x + ray->dir.x * t; isect->p.y = ray->org.y + ray->dir.y * t; isect->p.z = ray->org.z + ray->dir.z * t; isect->n = plane->n; } } __device__ void orthoBasis(vec *basis, vec n) { basis[2] = n; basis[1].x = 0.f; basis[1].y = 0.f; basis[1].z = 0.f; if ((n.x < 0.6f) && (n.x > -0.6f)) { basis[1].x = 1.0f; } else if ((n.y < 0.6f) && (n.y > -0.6f)) { basis[1].y = 1.0f; } else if ((n.z < 0.6f) && (n.z > -0.6f)) { basis[1].z = 1.0f; } else { basis[1].x = 1.0f; } vcross(&basis[0], basis[1], basis[2]); vnormalize(&basis[0]); vcross(&basis[1], basis[2], basis[0]); vnormalize(&basis[1]); } class RNG { public: unsigned int x; const int fmask = (1 << 23) - 1; __device__ RNG(const unsigned int seed) { x = seed; } __device__ int next() { x ^= x >> 6; x ^= x << 17; x ^= x >> 9; return int(x); } __device__ float operator()(void) { union { float f; int i; } u; u.i = (next() & fmask) | 0x3f800000; return u.f - 1.f; } }; __device__ void ambient_occlusion(vec *col, const Isect *isect, const Sphere *spheres, const Plane *plane, RNG &rng) { int i, j; int ntheta = NAO_SAMPLES; int nphi = NAO_SAMPLES; float eps = 0.0001f; vec p; p.x = isect->p.x + eps * isect->n.x; p.y = isect->p.y + eps * isect->n.y; p.z = isect->p.z + eps * isect->n.z; vec basis[3]; orthoBasis(basis, isect->n); float occlusion = 0.f; for (j = 0; j < ntheta; j++) { for (i = 0; i < nphi; i++) { float theta = sqrtf(rng()); float phi = 2.0f * (float)M_PI * rng(); float x = cosf(phi) * theta; float y = sinf(phi) * theta; float z = sqrtf(1.0f - theta * theta); // local -> global float rx = x * basis[0].x + y * basis[1].x + z * basis[2].x; float ry = x * basis[0].y + y * basis[1].y + z * basis[2].y; float rz = x * basis[0].z + y * basis[1].z + z * basis[2].z; Ray ray; ray.org = p; ray.dir.x = rx; ray.dir.y = ry; ray.dir.z = rz; Isect occIsect; occIsect.t = 1.0e+17f; occIsect.hit = 0; ray_sphere_intersect(&occIsect, &ray, spheres); ray_sphere_intersect(&occIsect, &ray, spheres+1); ray_sphere_intersect(&occIsect, &ray, spheres+2); ray_plane_intersect (&occIsect, &ray, plane); if (occIsect.hit) occlusion += 1.f; } } occlusion = (ntheta * nphi - occlusion) / (float)(ntheta * nphi); col->x = occlusion; col->y = occlusion; col->z = occlusion; } __device__ unsigned char clamp(float f) { int i = (int)(f * 255.5f); if (i < 0) i = 0; if (i > 255) i = 255; return (unsigned char)i; } void init_scene(Sphere* spheres, Plane &plane) { spheres[0].center.x = -2.0f; spheres[0].center.y = 0.0f; spheres[0].center.z = -3.5f; spheres[0].radius = 0.5f; spheres[1].center.x = -0.5f; spheres[1].center.y = 0.0f; spheres[1].center.z = -3.0f; spheres[1].radius = 0.5f; spheres[2].center.x = 1.0f; spheres[2].center.y = 0.0f; spheres[2].center.z = -2.2f; spheres[2].radius = 0.5f; plane.p.x = 0.0f; plane.p.y = -0.5f; plane.p.z = 0.0f; plane.n.x = 0.0f; plane.n.y = 1.0f; plane.n.z = 0.0f; } void saveppm(const char *fname, int w, int h, unsigned char *img) { FILE *fp; fp = fopen(fname, "wb"); assert(fp); fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n", w, h); fprintf(fp, "255\n"); fwrite(img, w * h * 3, 1, fp); fclose(fp); } __global__ void render_kernel (unsigned char *fimg, const Sphere *spheres, const Plane plane, const int h, const int w, const int nsubsamples) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { RNG rng(y * w + x); float s0 = 0; float s1 = 0; float s2 = 0; for(int v = 0; v < nsubsamples; v++ ) { for(int u = 0; u < nsubsamples; u++ ) { float px = ( x + ( u / ( float )nsubsamples ) - ( w / 2.0f ) ) / ( w / 2.0f ); float py = -( y + ( v / ( float )nsubsamples ) - ( h / 2.0f ) ) / ( h / 2.0f ); Ray ray; ray.org.x = 0.f; ray.org.y = 0.f; ray.org.z = 0.f; ray.dir.x = px; ray.dir.y = py; ray.dir.z = -1.f; vnormalize( &( ray.dir ) ); Isect isect; isect.t = 1.0e+17f; isect.hit = 0; ray_sphere_intersect( &isect, &ray, spheres ); ray_sphere_intersect( &isect, &ray, spheres + 1 ); ray_sphere_intersect( &isect, &ray, spheres + 2 ); ray_plane_intersect ( &isect, &ray, &plane ); if( isect.hit ) { vec col; ambient_occlusion( &col, &isect, spheres, &plane, rng ); s0 += col.x; s1 += col.y; s2 += col.z; } } } fimg[ 3 * ( y * w + x ) + 0 ] = clamp ( s0 / ( float )( nsubsamples * nsubsamples ) ); fimg[ 3 * ( y * w + x ) + 1 ] = clamp ( s1 / ( float )( nsubsamples * nsubsamples ) ); fimg[ 3 * ( y * w + x ) + 2 ] = clamp ( s2 / ( float )( nsubsamples * nsubsamples ) ); } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void render(unsigned char *img, int w, int h, int nsubsamples, const Sphere* spheres, const Plane &plane) { unsigned char *d_img; Sphere *d_spheres; gpuErrchk( cudaMalloc((void**)&d_img, sizeof(unsigned char) * w * h * 3) ); gpuErrchk( cudaMalloc((void**)&d_spheres, sizeof(Sphere) * 3) ); gpuErrchk( cudaMemcpy(d_spheres, spheres, sizeof(Sphere) * 3, cudaMemcpyHostToDevice) ); render_kernel <<< dim3((w+BLOCK_SIZE-1)/BLOCK_SIZE, (h+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE, BLOCK_SIZE) >>> (d_img, d_spheres, plane, h, w, nsubsamples); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(img, d_img, sizeof(unsigned char) * w * h * 3, cudaMemcpyDeviceToHost) ); cudaFree(d_img); cudaFree(d_spheres); } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <iterations>\n", argv[0]); return 1; } const int LOOPMAX = atoi(argv[1]); // three spheres in the image Sphere spheres[3]; Plane plane; init_scene(spheres, plane); unsigned char *img = ( unsigned char * )malloc( WIDTH * HEIGHT * 3 ); clock_t start; start = clock(); for( int i = 0; i < LOOPMAX; ++i ){ render( img, WIDTH, HEIGHT, NSUBSAMPLES, spheres, plane ); } clock_t end = clock(); float delta = ( float )end - ( float )start; float msec = delta * 1000.0 / ( float )CLOCKS_PER_SEC; printf( "Total render time (%d iterations): %f sec.\n", LOOPMAX, msec / 1000.0 ); printf( "Average render time: %f sec.\n", msec / 1000.0 / (float)LOOPMAX ); saveppm( "ao.ppm", WIDTH, HEIGHT, img ); free( img ); return 0; }
eb58ca815be893b7c05c14ae4e9adaf480209817.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include "caffe/FRCNN/roi_mask_pooling_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIMaskPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int half_part, const Dtype roi_scale, const Dtype mask_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype x1 = bottom_rois[1]; Dtype y1 = bottom_rois[2]; Dtype x2 = bottom_rois[3]; Dtype y2 = bottom_rois[4]; Dtype xc = (x1 + x2) / 2; Dtype yc = (y1 + y2) / 2; Dtype w = x2 - x1; Dtype h = y2 - y1; // rescale roi with regard to roi_scale and half_part Dtype xx1 = xc - w * roi_scale / 2; Dtype xx2 = xc + w * roi_scale / 2; Dtype yy1 = yc - h * roi_scale / 2; Dtype yy2 = yc + h * roi_scale / 2; switch(half_part) { case 0: break; case 1: xx2 = xc; break; case 2: xx1 = xc; break; case 3: yy2 = yc; break; case 4: yy1 = yc; break; default: break; } // rescale mask with regard to mask_scale and half_part bool isMask = mask_scale > 0.0; Dtype mx1 = xc - w * mask_scale / 2.0; Dtype mx2 = xc + w * mask_scale / 2.0; Dtype my1 = yc - h * mask_scale / 2.0; Dtype my2 = yc + h * mask_scale / 2.0; // rescaled roi/mask size on conv feature map int roi_start_w = round(xx1 * spatial_scale); int roi_start_h = round(yy1 * spatial_scale); int roi_end_w = round(xx2 * spatial_scale); int roi_end_h = round(yy2 * spatial_scale); int mask_start_w = round(mx1 * spatial_scale); int mask_start_h = round(my1 * spatial_scale); int mask_end_w = round(mx2 * spatial_scale); int mask_end_h = round(my2 * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype value = bottom_data[bottom_index]; // apply mask if (isMask) { if (w>=mask_start_w && w<=mask_end_w && h>=mask_start_h && h<=mask_end_h) { value = 0; } } // skip mask if (value > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIMaskPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIMaskPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, half_part_, roi_scale_, mask_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIMaskPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int half_part, const Dtype roi_scale, const Dtype mask_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype x1 = offset_bottom_rois[1]; Dtype y1 = offset_bottom_rois[2]; Dtype x2 = offset_bottom_rois[3]; Dtype y2 = offset_bottom_rois[4]; Dtype xc = (x1 + x2) / 2; Dtype yc = (y1 + y2) / 2; Dtype w1 = x2 - x1; Dtype h1 = y2 - y1; // rescale roi with regard to roi_scale and half_part Dtype xx1 = xc - w1 * roi_scale / 2; Dtype xx2 = xc + w1 * roi_scale / 2; Dtype yy1 = yc - h1 * roi_scale / 2; Dtype yy2 = yc + h1 * roi_scale / 2; switch(half_part) { case 0: break; case 1: xx2 = xc; break; case 2: xx1 = xc; break; case 3: yy2 = yc; break; case 4: yy1 = yc; break; default: break; } int roi_start_w = round(xx1 * spatial_scale); int roi_start_h = round(yy1 * spatial_scale); int roi_end_w = round(xx2 * spatial_scale); int roi_end_h = round(yy2 * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIMaskPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIMaskPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, half_part_, roi_scale_, mask_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIMaskPoolingLayer); } // namespace caffe
eb58ca815be893b7c05c14ae4e9adaf480209817.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include "caffe/FRCNN/roi_mask_pooling_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIMaskPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int half_part, const Dtype roi_scale, const Dtype mask_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype x1 = bottom_rois[1]; Dtype y1 = bottom_rois[2]; Dtype x2 = bottom_rois[3]; Dtype y2 = bottom_rois[4]; Dtype xc = (x1 + x2) / 2; Dtype yc = (y1 + y2) / 2; Dtype w = x2 - x1; Dtype h = y2 - y1; // rescale roi with regard to roi_scale and half_part Dtype xx1 = xc - w * roi_scale / 2; Dtype xx2 = xc + w * roi_scale / 2; Dtype yy1 = yc - h * roi_scale / 2; Dtype yy2 = yc + h * roi_scale / 2; switch(half_part) { case 0: break; case 1: xx2 = xc; break; case 2: xx1 = xc; break; case 3: yy2 = yc; break; case 4: yy1 = yc; break; default: break; } // rescale mask with regard to mask_scale and half_part bool isMask = mask_scale > 0.0; Dtype mx1 = xc - w * mask_scale / 2.0; Dtype mx2 = xc + w * mask_scale / 2.0; Dtype my1 = yc - h * mask_scale / 2.0; Dtype my2 = yc + h * mask_scale / 2.0; // rescaled roi/mask size on conv feature map int roi_start_w = round(xx1 * spatial_scale); int roi_start_h = round(yy1 * spatial_scale); int roi_end_w = round(xx2 * spatial_scale); int roi_end_h = round(yy2 * spatial_scale); int mask_start_w = round(mx1 * spatial_scale); int mask_start_h = round(my1 * spatial_scale); int mask_end_w = round(mx2 * spatial_scale); int mask_end_h = round(my2 * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype value = bottom_data[bottom_index]; // apply mask if (isMask) { if (w>=mask_start_w && w<=mask_end_w && h>=mask_start_h && h<=mask_end_h) { value = 0; } } // skip mask if (value > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIMaskPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ROIMaskPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, half_part_, roi_scale_, mask_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIMaskPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int half_part, const Dtype roi_scale, const Dtype mask_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype x1 = offset_bottom_rois[1]; Dtype y1 = offset_bottom_rois[2]; Dtype x2 = offset_bottom_rois[3]; Dtype y2 = offset_bottom_rois[4]; Dtype xc = (x1 + x2) / 2; Dtype yc = (y1 + y2) / 2; Dtype w1 = x2 - x1; Dtype h1 = y2 - y1; // rescale roi with regard to roi_scale and half_part Dtype xx1 = xc - w1 * roi_scale / 2; Dtype xx2 = xc + w1 * roi_scale / 2; Dtype yy1 = yc - h1 * roi_scale / 2; Dtype yy2 = yc + h1 * roi_scale / 2; switch(half_part) { case 0: break; case 1: xx2 = xc; break; case 2: xx1 = xc; break; case 3: yy2 = yc; break; case 4: yy1 = yc; break; default: break; } int roi_start_w = round(xx1 * spatial_scale); int roi_start_h = round(yy1 * spatial_scale); int roi_end_w = round(xx2 * spatial_scale); int roi_end_h = round(yy2 * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIMaskPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) ROIMaskPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, half_part_, roi_scale_, mask_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIMaskPoolingLayer); } // namespace caffe
3799be4e9e2bd255c22e6f6db3fdeb4058dbd012.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void affineDeformation(const float *A, const unsigned char *in, unsigned char *out, int newWidth, int newHeight ,int oldWidth, int oldHeight) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if(Col < newWidth && Row < newHeight){ float a00 = A[0]; float a01 = A[1]; float a10 = A[3]; float a11 = A[4]; float t0 = A[2]; float t1 = A[5]; float det_A = a00*a11 - a01*a10; float inv_det_A = 1.0 / det_A; int currentPixel = Row * newWidth + Col; float xa = Col - t0; float ya = Row - t1; float xp = inv_det_A * (a11 * xa - a01 * ya); float yp = inv_det_A * (a00 * ya - a10 * xa); int value = 0; int xi = (int)xp; int yi = (int)yp; if (xi >= 0 && yi >= 0 && xi < oldWidth-1 && yi < oldHeight-1){ int I00 = in[yi * oldWidth + xi]; int I10 = in[yi * oldWidth + (xi+1)]; int I01 = in[(yi + 1) * oldWidth + xi]; int I11 = in[(yi + 1) * oldWidth + (xi+1)]; float alpha = xp - xi; float beta = yp - yi; float interp = (1.0 - alpha) * (1.0 - beta) * I00 + (1.0 - alpha) * beta * I01 + alpha * (1.0 - beta) * I10 + alpha * beta * I11; int interpi = (int)interp; if (interpi < 0) interpi = 0; else if (interpi > 255) interpi = 255; value = interpi; } out[currentPixel] = value; } }
3799be4e9e2bd255c22e6f6db3fdeb4058dbd012.cu
__global__ void affineDeformation(const float *A, const unsigned char *in, unsigned char *out, int newWidth, int newHeight ,int oldWidth, int oldHeight) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if(Col < newWidth && Row < newHeight){ float a00 = A[0]; float a01 = A[1]; float a10 = A[3]; float a11 = A[4]; float t0 = A[2]; float t1 = A[5]; float det_A = a00*a11 - a01*a10; float inv_det_A = 1.0 / det_A; int currentPixel = Row * newWidth + Col; float xa = Col - t0; float ya = Row - t1; float xp = inv_det_A * (a11 * xa - a01 * ya); float yp = inv_det_A * (a00 * ya - a10 * xa); int value = 0; int xi = (int)xp; int yi = (int)yp; if (xi >= 0 && yi >= 0 && xi < oldWidth-1 && yi < oldHeight-1){ int I00 = in[yi * oldWidth + xi]; int I10 = in[yi * oldWidth + (xi+1)]; int I01 = in[(yi + 1) * oldWidth + xi]; int I11 = in[(yi + 1) * oldWidth + (xi+1)]; float alpha = xp - xi; float beta = yp - yi; float interp = (1.0 - alpha) * (1.0 - beta) * I00 + (1.0 - alpha) * beta * I01 + alpha * (1.0 - beta) * I10 + alpha * beta * I11; int interpi = (int)interp; if (interpi < 0) interpi = 0; else if (interpi > 255) interpi = 255; value = interpi; } out[currentPixel] = value; } }
92307f5dde5eb88e200645445608b22a0cca83cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2018, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: dgemm /// /// PURPOSE: This program tests the efficiency with which a dense matrix /// dense multiplication is carried out /// /// USAGE: The program takes as input the matrix order, /// the number of times the matrix-matrix multiplication /// is carried out, and, optionally, a tile size for matrix /// blocking /// /// <progname> <# iterations> <matrix order> [<batches>] /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than OpenMP or standard C functions, the following /// functions are used in this program: /// /// cblasDgemm() /// hipblasDgemmStridedBatched() /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, December, 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" __global__ void init(int order, const int matrices, double * A, double * B, double * C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for (int b=0; b<matrices; ++b) { if ((i<order) && (j<order)) { A[b*order*order+i*order+j] = i; B[b*order*order+i*order+j] = i; C[b*order*order+i*order+j] = 0; } } } __global__ void init(int order, const int matrices, double * C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for (int b=0; b<matrices; ++b) { if ((i<order) && (j<order)) { C[b*order*order+i*order+j] = 0; } } } void prk_dgemm(const hipblasHandle_t & h, const int order, const int batches, double * A, double * B, double * C) { const double alpha = 1.0; const double beta = 1.0; for (int b=0; b<batches; ++b) { double * pA = &(A[b*order*order]); double * pB = &(B[b*order*order]); double * pC = &(C[b*order*order]); prk::CUDA::check( hipblasDgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, // opA, opB order, order, order, // m, n, k &alpha, // alpha pA, order, // A, lda pB, order, // B, ldb &beta, // beta pC, order) ); // C, ldc } prk::CUDA::check( hipDeviceSynchronize() ); } void prk_bgemm(const hipblasHandle_t & h, const int order, const int batches, double * A, double * B, double * C) { const double alpha = 1.0; const double beta = 1.0; prk::CUDA::check( hipblasDgemmStridedBatched(h, HIPBLAS_OP_N, HIPBLAS_OP_N, order, order, order, &alpha, (const double *)A, order, order*order, (const double *)B, order, order*order, &beta, C, order, order*order, batches) ); prk::CUDA::check( hipDeviceSynchronize() ); // hipblasStatus_t hipblasDgemmBatched(hipblasHandle_t handle, // hipblasOperation_t transa, // hipblasOperation_t transb, // int m, int n, int k, // const double *alpha, // const double *Aarray[], int lda, // const double *Barray[], int ldb, // const double *beta, // double *Carray[], int ldc, // int batchCount) } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order; int batches = 0; int input_copy = 0; try { if (argc < 2) { throw "Usage: <# iterations> <matrix order> [<batches>] [<copy input every iteration [0/1]>]"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > ::floor(std::sqrt(INT_MAX))) { throw "ERROR: matrix dimension too large - overflow risk"; } if (argc>3) { batches = std::atoi(argv[3]); } if (argc > 4) { input_copy = std::atoi(argv[3]); } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; if (batches == 0) { std::cout << "No batching" << std::endl; } else if (batches < 0) { std::cout << "Batch size = " << -batches << " (loop over legacy BLAS)" << std::endl; } else if (batches > 0) { std::cout << "Batch size = " << batches << " (batched BLAS)" << std::endl; } std::cout << "Input copy = " << (input_copy ? "yes" : "no") << std::endl; hipblasHandle_t h; prk::CUDA::check( hipblasCreate(&h) ); const int tile_size = 32; dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space for matrices ////////////////////////////////////////////////////////////////////// double dgemm_time(0); const int matrices = (batches==0 ? 1 : abs(batches)); const size_t nelems = (size_t)order * (size_t)order; const size_t bytes = nelems * sizeof(double); // host buffers double * h_a; double * h_b; double * h_c; prk::CUDA::check( hipHostMalloc((void**)&h_a, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_b, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_c, matrices*bytes) ); // device buffers double * d_a; double * d_b; double * d_c; prk::CUDA::check( hipMalloc((void**)&d_a, matrices*bytes) ); prk::CUDA::check( hipMalloc((void**)&d_b, matrices*bytes) ); prk::CUDA::check( hipMalloc((void**)&d_c, matrices*bytes) ); if (input_copy) { for (int i=0; i<order; ++i) { for (int j=0; j<order; ++j) { h_a[i*order+j] = i; h_b[i*order+j] = i; } } for (int b=0; b<matrices; ++b) { prk::CUDA::check( hipMemcpyAsync(&(d_a[b*order*order]), h_a, bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpyAsync(&(d_b[b*order*order]), h_b, bytes, hipMemcpyHostToDevice) ); } prk::CUDA::check( hipDeviceSynchronize() ); hipLaunchKernelGGL(( init), dim3(dimGrid), dim3(dimBlock), 0, 0, order, matrices, d_c); } else { hipLaunchKernelGGL(( init), dim3(dimGrid), dim3(dimBlock), 0, 0, order, matrices, d_a, d_b, d_c); } { for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) dgemm_time = prk::wtime(); if (input_copy) { for (int b=0; b<matrices; ++b) { prk::CUDA::check( hipMemcpyAsync(&(d_a[b*order*order]), h_a, bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpyAsync(&(d_b[b*order*order]), h_b, bytes, hipMemcpyHostToDevice) ); } prk::CUDA::check( hipDeviceSynchronize() ); } if (batches == 0) { prk_dgemm(h, order, matrices, d_a, d_b, d_c); } else if (batches < 0) { prk_dgemm(h, order, matrices, d_a, d_b, d_c); } else if (batches > 0) { prk_bgemm(h, order, matrices, d_a, d_b, d_c); } } dgemm_time = prk::wtime() - dgemm_time; } // copy output back to host prk::CUDA::check( hipMemcpyAsync(&(h_c[0]), d_c, matrices*bytes, hipMemcpyDeviceToHost) ); prk::CUDA::check( hipFree(d_c) ); prk::CUDA::check( hipFree(d_b) ); prk::CUDA::check( hipFree(d_a) ); prk::CUDA::check( hipHostFree(h_a) ); prk::CUDA::check( hipHostFree(h_b) ); prk::CUDA::check( hipblasDestroy(h) ); prk::CUDA::check( hipDeviceSynchronize() ); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double epsilon = 1.0e-8; const double forder = static_cast<double>(order); const double reference = 0.25 * ::pow(forder,3) * ::pow(forder-1.0,2) * (iterations+1); double residuum(0); for (int b=0; b<matrices; ++b) { const auto checksum = prk_reduce( &(h_c[b*order*order+0]), &(h_c[b*order*order+nelems]), 0.0); residuum += std::abs(checksum-reference)/reference; } residuum/=matrices; if (residuum < epsilon) { #if VERBOSE std::cout << "Reference checksum = " << reference << "\n" << "Actual checksum = " << checksum << std::endl; #endif std::cout << "Solution validates" << std::endl; auto avgtime = dgemm_time/iterations/matrices; auto nflops = 2.0 * ::pow(forder,3); std::cout << "Rate (MF/s): " << 1.0e-6 * nflops/avgtime << " Avg time (s): " << avgtime << std::endl; } else { std::cout << "Reference checksum = " << reference << "\n" << "Residuum = " << residuum << std::endl; return 1; } prk::CUDA::check( hipHostFree(h_c) ); return 0; }
92307f5dde5eb88e200645445608b22a0cca83cf.cu
/// /// Copyright (c) 2018, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: dgemm /// /// PURPOSE: This program tests the efficiency with which a dense matrix /// dense multiplication is carried out /// /// USAGE: The program takes as input the matrix order, /// the number of times the matrix-matrix multiplication /// is carried out, and, optionally, a tile size for matrix /// blocking /// /// <progname> <# iterations> <matrix order> [<batches>] /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than OpenMP or standard C functions, the following /// functions are used in this program: /// /// cblasDgemm() /// cublasDgemmStridedBatched() /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, December, 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" __global__ void init(int order, const int matrices, double * A, double * B, double * C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for (int b=0; b<matrices; ++b) { if ((i<order) && (j<order)) { A[b*order*order+i*order+j] = i; B[b*order*order+i*order+j] = i; C[b*order*order+i*order+j] = 0; } } } __global__ void init(int order, const int matrices, double * C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; for (int b=0; b<matrices; ++b) { if ((i<order) && (j<order)) { C[b*order*order+i*order+j] = 0; } } } void prk_dgemm(const cublasHandle_t & h, const int order, const int batches, double * A, double * B, double * C) { const double alpha = 1.0; const double beta = 1.0; for (int b=0; b<batches; ++b) { double * pA = &(A[b*order*order]); double * pB = &(B[b*order*order]); double * pC = &(C[b*order*order]); prk::CUDA::check( cublasDgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, // opA, opB order, order, order, // m, n, k &alpha, // alpha pA, order, // A, lda pB, order, // B, ldb &beta, // beta pC, order) ); // C, ldc } prk::CUDA::check( cudaDeviceSynchronize() ); } void prk_bgemm(const cublasHandle_t & h, const int order, const int batches, double * A, double * B, double * C) { const double alpha = 1.0; const double beta = 1.0; prk::CUDA::check( cublasDgemmStridedBatched(h, CUBLAS_OP_N, CUBLAS_OP_N, order, order, order, &alpha, (const double *)A, order, order*order, (const double *)B, order, order*order, &beta, C, order, order*order, batches) ); prk::CUDA::check( cudaDeviceSynchronize() ); // cublasStatus_t cublasDgemmBatched(cublasHandle_t handle, // cublasOperation_t transa, // cublasOperation_t transb, // int m, int n, int k, // const double *alpha, // const double *Aarray[], int lda, // const double *Barray[], int ldb, // const double *beta, // double *Carray[], int ldc, // int batchCount) } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order; int batches = 0; int input_copy = 0; try { if (argc < 2) { throw "Usage: <# iterations> <matrix order> [<batches>] [<copy input every iteration [0/1]>]"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > std::floor(std::sqrt(INT_MAX))) { throw "ERROR: matrix dimension too large - overflow risk"; } if (argc>3) { batches = std::atoi(argv[3]); } if (argc > 4) { input_copy = std::atoi(argv[3]); } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; if (batches == 0) { std::cout << "No batching" << std::endl; } else if (batches < 0) { std::cout << "Batch size = " << -batches << " (loop over legacy BLAS)" << std::endl; } else if (batches > 0) { std::cout << "Batch size = " << batches << " (batched BLAS)" << std::endl; } std::cout << "Input copy = " << (input_copy ? "yes" : "no") << std::endl; cublasHandle_t h; prk::CUDA::check( cublasCreate(&h) ); const int tile_size = 32; dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space for matrices ////////////////////////////////////////////////////////////////////// double dgemm_time(0); const int matrices = (batches==0 ? 1 : abs(batches)); const size_t nelems = (size_t)order * (size_t)order; const size_t bytes = nelems * sizeof(double); // host buffers double * h_a; double * h_b; double * h_c; prk::CUDA::check( cudaMallocHost((void**)&h_a, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_b, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_c, matrices*bytes) ); // device buffers double * d_a; double * d_b; double * d_c; prk::CUDA::check( cudaMalloc((void**)&d_a, matrices*bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_b, matrices*bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_c, matrices*bytes) ); if (input_copy) { for (int i=0; i<order; ++i) { for (int j=0; j<order; ++j) { h_a[i*order+j] = i; h_b[i*order+j] = i; } } for (int b=0; b<matrices; ++b) { prk::CUDA::check( cudaMemcpyAsync(&(d_a[b*order*order]), h_a, bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpyAsync(&(d_b[b*order*order]), h_b, bytes, cudaMemcpyHostToDevice) ); } prk::CUDA::check( cudaDeviceSynchronize() ); init<<<dimGrid, dimBlock>>>(order, matrices, d_c); } else { init<<<dimGrid, dimBlock>>>(order, matrices, d_a, d_b, d_c); } { for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) dgemm_time = prk::wtime(); if (input_copy) { for (int b=0; b<matrices; ++b) { prk::CUDA::check( cudaMemcpyAsync(&(d_a[b*order*order]), h_a, bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpyAsync(&(d_b[b*order*order]), h_b, bytes, cudaMemcpyHostToDevice) ); } prk::CUDA::check( cudaDeviceSynchronize() ); } if (batches == 0) { prk_dgemm(h, order, matrices, d_a, d_b, d_c); } else if (batches < 0) { prk_dgemm(h, order, matrices, d_a, d_b, d_c); } else if (batches > 0) { prk_bgemm(h, order, matrices, d_a, d_b, d_c); } } dgemm_time = prk::wtime() - dgemm_time; } // copy output back to host prk::CUDA::check( cudaMemcpyAsync(&(h_c[0]), d_c, matrices*bytes, cudaMemcpyDeviceToHost) ); prk::CUDA::check( cudaFree(d_c) ); prk::CUDA::check( cudaFree(d_b) ); prk::CUDA::check( cudaFree(d_a) ); prk::CUDA::check( cudaFreeHost(h_a) ); prk::CUDA::check( cudaFreeHost(h_b) ); prk::CUDA::check( cublasDestroy(h) ); prk::CUDA::check( cudaDeviceSynchronize() ); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double epsilon = 1.0e-8; const double forder = static_cast<double>(order); const double reference = 0.25 * std::pow(forder,3) * std::pow(forder-1.0,2) * (iterations+1); double residuum(0); for (int b=0; b<matrices; ++b) { const auto checksum = prk_reduce( &(h_c[b*order*order+0]), &(h_c[b*order*order+nelems]), 0.0); residuum += std::abs(checksum-reference)/reference; } residuum/=matrices; if (residuum < epsilon) { #if VERBOSE std::cout << "Reference checksum = " << reference << "\n" << "Actual checksum = " << checksum << std::endl; #endif std::cout << "Solution validates" << std::endl; auto avgtime = dgemm_time/iterations/matrices; auto nflops = 2.0 * std::pow(forder,3); std::cout << "Rate (MF/s): " << 1.0e-6 * nflops/avgtime << " Avg time (s): " << avgtime << std::endl; } else { std::cout << "Reference checksum = " << reference << "\n" << "Residuum = " << residuum << std::endl; return 1; } prk::CUDA::check( cudaFreeHost(h_c) ); return 0; }
b84fbbec004bad996838339e36169c39f29f4aea.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "open3d/t/pipelines/kernel/TransformationConverterImpl.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { template <typename scalar_t> __global__ void PoseToTransformationKernel(scalar_t *transformation_ptr, const scalar_t *X_ptr) { PoseToTransformationImpl(transformation_ptr, X_ptr); } template <typename scalar_t> void PoseToTransformationCUDA(scalar_t *transformation_ptr, const scalar_t *X_ptr) { utility::LogError("Unsupported data type."); } template <> void PoseToTransformationCUDA<float>(float *transformation_ptr, const float *X_ptr) { hipLaunchKernelGGL(( PoseToTransformationKernel<float>), dim3(1), dim3(1), 0, 0, transformation_ptr, X_ptr); } template <> void PoseToTransformationCUDA<double>(double *transformation_ptr, const double *X_ptr) { hipLaunchKernelGGL(( PoseToTransformationKernel<double>), dim3(1), dim3(1), 0, 0, transformation_ptr, X_ptr); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
b84fbbec004bad996838339e36169c39f29f4aea.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <cuda.h> #include <cuda_runtime.h> #include "open3d/t/pipelines/kernel/TransformationConverterImpl.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { template <typename scalar_t> __global__ void PoseToTransformationKernel(scalar_t *transformation_ptr, const scalar_t *X_ptr) { PoseToTransformationImpl(transformation_ptr, X_ptr); } template <typename scalar_t> void PoseToTransformationCUDA(scalar_t *transformation_ptr, const scalar_t *X_ptr) { utility::LogError("Unsupported data type."); } template <> void PoseToTransformationCUDA<float>(float *transformation_ptr, const float *X_ptr) { PoseToTransformationKernel<float><<<1, 1>>>(transformation_ptr, X_ptr); } template <> void PoseToTransformationCUDA<double>(double *transformation_ptr, const double *X_ptr) { PoseToTransformationKernel<double><<<1, 1>>>(transformation_ptr, X_ptr); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
2b2809df8ec46f87656e804c93e98da097b6eb3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"headerfiles.h" __global__ void Fnorm(float *dev_image, float *dev_back_image, float *dev_norm_image) { for (int i=0;i<Nx;i++) { for (int j = threadIdx.x+blockIdx.x*blockDim.x;j<Ny*Nz;j += blockDim.x * gridDim.x) { dev_image[j+Ny*Nz*i] = dev_image[j+Ny*Nz*i]*(dev_back_image[j+Ny*Nz*i])/(dev_norm_image[j+Ny*Nz*i]); } } } void CalcNormImage(float *norm_image, int numoflinesForNorm, const char* filename) { for (int i=0;i<Nx*Ny*Nz;i++){*(norm_image+i) = 0;} float * dev_back_image; hipMalloc ( ( void**)&dev_back_image, Nx*Ny*Nz * sizeof(float) ); hipMemcpy(dev_back_image, norm_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyHostToDevice); FILE * lor_data; lor_data = fopen(filename, "r"); if (lor_data == NULL) { printf("lor data file not found\n"); exit(1); } else { printf("lor data file found as %s\n",filename); } // read data from lor file: short *lor_data_array= (short *)malloc(sizeof(short) * numoflinesForNorm * 4); for (int i=0;i<numoflinesForNorm;i++) { fscanf(lor_data,"%hd\t%hd\t%hd\t%hd\n", &lor_data_array[4*i], &lor_data_array[4*i+1], &lor_data_array[4*i+2], &lor_data_array[4*i+3]); } // copy data from local to device short *dev_lor_data_array; hipMalloc ( ( void**)&dev_lor_data_array, 4*numoflinesForNorm * sizeof(short) ); hipMemcpy(dev_lor_data_array, lor_data_array, 4*numoflinesForNorm *sizeof(short ),hipMemcpyHostToDevice); free(lor_data_array); float * dx_array; float * dy_array; float * dz_array; hipMalloc ( ( void**)&dx_array,numoflinesForNorm*sizeof(float)); hipMalloc ( ( void**)&dy_array,numoflinesForNorm*sizeof(float)); hipMalloc ( ( void**)&dz_array,numoflinesForNorm*sizeof(float)); hipLaunchKernelGGL(( convertolor), dim3(128),dim3(128), 0, 0, dev_lor_data_array,dx_array,dy_array,dz_array,numoflinesForNorm); float *hx_array= (float *)malloc(sizeof(float)*numoflinesForNorm); float *hy_array= (float *)malloc(sizeof(float)*numoflinesForNorm); float *hz_array= (float *)malloc(sizeof(float)*numoflinesForNorm); hipMemcpy(hx_array, dx_array, sizeof(float)*numoflinesForNorm,hipMemcpyDeviceToHost); hipMemcpy(hy_array, dy_array, sizeof(float)*numoflinesForNorm,hipMemcpyDeviceToHost); hipMemcpy(hz_array, dz_array, sizeof(float)*numoflinesForNorm,hipMemcpyDeviceToHost); hipFree(dx_array);hipFree(dy_array);hipFree(dz_array); int *indexxmax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *indexymax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *indexzmax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *sizen = (int *)malloc(sizeof(int)*3); partlor(hx_array,hy_array,hz_array, numoflinesForNorm, indexxmax, indexymax, indexzmax, sizen); free(hx_array); free(hy_array); free(hz_array); int *dev_indexxmax; int *dev_indexymax;// int *dev_indexzmax; hipMalloc ( ( void**)&dev_indexxmax, sizen[0] * sizeof(int) ); hipMemcpy(dev_indexxmax, indexxmax, sizen[0] * sizeof(int),hipMemcpyHostToDevice); hipMalloc ( ( void**)&dev_indexymax, sizen[1] * sizeof(int) ); hipMemcpy(dev_indexymax, indexymax, sizen[1] * sizeof(int),hipMemcpyHostToDevice); free(indexxmax); free(indexymax); free(indexzmax); float *image = (float *)malloc(sizeof(float)*Nx*Ny*Nz); for (int i=0;i<Nx*Ny*Nz;i++){*(image+i) = 1.0;} float * dev_image; hipMalloc ( ( void**)&dev_image, Nx*Ny*Nz * sizeof(float) ); hipMemcpy(dev_image, image, Nx*Ny*Nz *sizeof(float ),hipMemcpyHostToDevice); free(image); float * dev_tempback_image; hipMalloc ( ( void**)&dev_tempback_image, Nx*Ny*Nz * sizeof(float) ); hipMemcpy(dev_tempback_image, norm_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyHostToDevice); hipMemcpy(dev_back_image, norm_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyHostToDevice); int nlines = 128*128; CUDAlor* lines; hipMalloc ( ( void**)&lines, nlines * sizeof(CUDAlor) ); // 7 elements for the lines structure int totalnumoflinesxz = sizen[1]; int totalnumoflinesyz = sizen[0]; for (int i=0; i<totalnumoflinesxz/nlines; i++) { int realnlines = nlines; int noffset = i*nlines; hipLaunchKernelGGL(( convertolorxz), dim3(128),dim3(128), 0, 0, dev_lor_data_array,dev_indexymax,lines,realnlines,noffset); hipLaunchKernelGGL(( Forwardprojxz), dim3(128),dim3(128), 0, 0, dev_image, lines, realnlines); hipLaunchKernelGGL(( Backprojxz), dim3(128),dim3(128), 0, 0, dev_image,dev_back_image,lines,realnlines,1); } hipLaunchKernelGGL(( Frotate), dim3(128),dim3(128), 0, 0, dev_back_image, dev_tempback_image); hipMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyDeviceToDevice); for (int i=0; i<totalnumoflinesyz/nlines; i++) { int realnlines = nlines; int noffset = i*nlines; hipLaunchKernelGGL(( convertoloryz), dim3(128),dim3(128), 0, 0, dev_lor_data_array,dev_indexxmax,lines,realnlines,noffset); hipLaunchKernelGGL(( Forwardprojyz), dim3(128),dim3(128), 0, 0, dev_image, lines, realnlines); hipLaunchKernelGGL(( Backprojyz), dim3(128),dim3(128), 0, 0, dev_image,dev_back_image,lines,realnlines,1); } hipLaunchKernelGGL(( Frotate), dim3(128),dim3(128), 0, 0, dev_back_image, dev_tempback_image); hipMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyDeviceToDevice); // Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); // hipMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyDeviceToDevice); // Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); // hipMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyDeviceToDevice); hipMemcpy(norm_image, dev_back_image, Nx*Ny*Nz *sizeof(float ),hipMemcpyDeviceToHost); hipFree(dev_lor_data_array); hipFree(dev_image); hipFree(dev_back_image); hipFree(dev_tempback_image); hipFree(lines); hipFree(dev_indexxmax); hipFree(dev_indexymax);free(sizen);//hipFree(dev_indexzmax); }
2b2809df8ec46f87656e804c93e98da097b6eb3c.cu
#include"headerfiles.h" __global__ void Fnorm(float *dev_image, float *dev_back_image, float *dev_norm_image) { for (int i=0;i<Nx;i++) { for (int j = threadIdx.x+blockIdx.x*blockDim.x;j<Ny*Nz;j += blockDim.x * gridDim.x) { dev_image[j+Ny*Nz*i] = dev_image[j+Ny*Nz*i]*(dev_back_image[j+Ny*Nz*i])/(dev_norm_image[j+Ny*Nz*i]); } } } void CalcNormImage(float *norm_image, int numoflinesForNorm, const char* filename) { for (int i=0;i<Nx*Ny*Nz;i++){*(norm_image+i) = 0;} float * dev_back_image; cudaMalloc ( ( void**)&dev_back_image, Nx*Ny*Nz * sizeof(float) ); cudaMemcpy(dev_back_image, norm_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyHostToDevice); FILE * lor_data; lor_data = fopen(filename, "r"); if (lor_data == NULL) { printf("lor data file not found\n"); exit(1); } else { printf("lor data file found as %s\n",filename); } // read data from lor file: short *lor_data_array= (short *)malloc(sizeof(short) * numoflinesForNorm * 4); for (int i=0;i<numoflinesForNorm;i++) { fscanf(lor_data,"%hd\t%hd\t%hd\t%hd\n", &lor_data_array[4*i], &lor_data_array[4*i+1], &lor_data_array[4*i+2], &lor_data_array[4*i+3]); } // copy data from local to device short *dev_lor_data_array; cudaMalloc ( ( void**)&dev_lor_data_array, 4*numoflinesForNorm * sizeof(short) ); cudaMemcpy(dev_lor_data_array, lor_data_array, 4*numoflinesForNorm *sizeof(short ),cudaMemcpyHostToDevice); free(lor_data_array); float * dx_array; float * dy_array; float * dz_array; cudaMalloc ( ( void**)&dx_array,numoflinesForNorm*sizeof(float)); cudaMalloc ( ( void**)&dy_array,numoflinesForNorm*sizeof(float)); cudaMalloc ( ( void**)&dz_array,numoflinesForNorm*sizeof(float)); convertolor<<<128,128>>>(dev_lor_data_array,dx_array,dy_array,dz_array,numoflinesForNorm); float *hx_array= (float *)malloc(sizeof(float)*numoflinesForNorm); float *hy_array= (float *)malloc(sizeof(float)*numoflinesForNorm); float *hz_array= (float *)malloc(sizeof(float)*numoflinesForNorm); cudaMemcpy(hx_array, dx_array, sizeof(float)*numoflinesForNorm,cudaMemcpyDeviceToHost); cudaMemcpy(hy_array, dy_array, sizeof(float)*numoflinesForNorm,cudaMemcpyDeviceToHost); cudaMemcpy(hz_array, dz_array, sizeof(float)*numoflinesForNorm,cudaMemcpyDeviceToHost); cudaFree(dx_array);cudaFree(dy_array);cudaFree(dz_array); int *indexxmax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *indexymax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *indexzmax = (int *)malloc(sizeof(int)*numoflinesForNorm); int *sizen = (int *)malloc(sizeof(int)*3); partlor(hx_array,hy_array,hz_array, numoflinesForNorm, indexxmax, indexymax, indexzmax, sizen); free(hx_array); free(hy_array); free(hz_array); int *dev_indexxmax; int *dev_indexymax;// int *dev_indexzmax; cudaMalloc ( ( void**)&dev_indexxmax, sizen[0] * sizeof(int) ); cudaMemcpy(dev_indexxmax, indexxmax, sizen[0] * sizeof(int),cudaMemcpyHostToDevice); cudaMalloc ( ( void**)&dev_indexymax, sizen[1] * sizeof(int) ); cudaMemcpy(dev_indexymax, indexymax, sizen[1] * sizeof(int),cudaMemcpyHostToDevice); free(indexxmax); free(indexymax); free(indexzmax); float *image = (float *)malloc(sizeof(float)*Nx*Ny*Nz); for (int i=0;i<Nx*Ny*Nz;i++){*(image+i) = 1.0;} float * dev_image; cudaMalloc ( ( void**)&dev_image, Nx*Ny*Nz * sizeof(float) ); cudaMemcpy(dev_image, image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyHostToDevice); free(image); float * dev_tempback_image; cudaMalloc ( ( void**)&dev_tempback_image, Nx*Ny*Nz * sizeof(float) ); cudaMemcpy(dev_tempback_image, norm_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyHostToDevice); cudaMemcpy(dev_back_image, norm_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyHostToDevice); int nlines = 128*128; CUDAlor* lines; cudaMalloc ( ( void**)&lines, nlines * sizeof(CUDAlor) ); // 7 elements for the lines structure int totalnumoflinesxz = sizen[1]; int totalnumoflinesyz = sizen[0]; for (int i=0; i<totalnumoflinesxz/nlines; i++) { int realnlines = nlines; int noffset = i*nlines; convertolorxz<<<128,128>>>(dev_lor_data_array,dev_indexymax,lines,realnlines,noffset); Forwardprojxz<<<128,128>>>(dev_image, lines, realnlines); Backprojxz<<<128,128>>>(dev_image,dev_back_image,lines,realnlines,1); } Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); cudaMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyDeviceToDevice); for (int i=0; i<totalnumoflinesyz/nlines; i++) { int realnlines = nlines; int noffset = i*nlines; convertoloryz<<<128,128>>>(dev_lor_data_array,dev_indexxmax,lines,realnlines,noffset); Forwardprojyz<<<128,128>>>(dev_image, lines, realnlines); Backprojyz<<<128,128>>>(dev_image,dev_back_image,lines,realnlines,1); } Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); cudaMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyDeviceToDevice); // Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); // cudaMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyDeviceToDevice); // Frotate<<<128,128>>>(dev_back_image, dev_tempback_image); // cudaMemcpy(dev_back_image, dev_tempback_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyDeviceToDevice); cudaMemcpy(norm_image, dev_back_image, Nx*Ny*Nz *sizeof(float ),cudaMemcpyDeviceToHost); cudaFree(dev_lor_data_array); cudaFree(dev_image); cudaFree(dev_back_image); cudaFree(dev_tempback_image); cudaFree(lines); cudaFree(dev_indexxmax); cudaFree(dev_indexymax);free(sizen);//cudaFree(dev_indexzmax); }
750fbad462118e8fc70d6c033bf4e93852d93504.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define DIM_BLOCO 32 #define DIM_GRID 1960 // 1960*1960*1024 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n'); c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n'); img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void count_hist(PPMPixel *data, float *h, unsigned int n_){ // n_ total de pixels // Uma thread para cada combinao (pixel,rgb), onde pixel = 0, 1, ..., n_-1 e rgb = 0, 1, ..., 63 // Calcular ndice da thread e os respectivos ndices x, i, j, k, l // DIM_BLOCO = blockDim.x = blockDim.y // DIM_GRID = gridDim.x = gridDim.y unsigned int index = DIM_BLOCO*DIM_BLOCO*(DIM_GRID*blockIdx.x+blockIdx.y)+blockDim.y*threadIdx.x+threadIdx.y; unsigned int x = index/n_; unsigned int i = index%n_; unsigned int j = x/16; unsigned int k = (x-16*j)/4; unsigned int l = (x-16*j-4*k); if (index < 64*n_ && data[i].red == j && data[i].green == k && data[i].blue == l) { atomicAdd(&h[x],1.0); // o histograma normalizado depois para evitar erro de preciso } } void Histogram(PPMImage *image, float *h) { int i, j, k, l, x, count; int rows, cols; unsigned int n = image->y * image->x; double t_start, t_end, t_cbuffer, t_offload_enviar, t_kernel, t_offload_receber; cols = image->x; rows = image->y; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } // Parte movida para a GPU unsigned int size = 3*sizeof(unsigned char)*n; PPMPixel *d_data; float *d_h; t_start = rtclock(); hipMalloc((void **)&d_data,size); hipMalloc((void **)&d_h,64*sizeof(float)); t_end = rtclock(); t_cbuffer = t_end-t_start; t_start = rtclock(); hipMemcpy(d_data,image->data,size,hipMemcpyHostToDevice); hipMemcpy(d_h,h,64*sizeof(float),hipMemcpyHostToDevice); t_end = rtclock(); t_offload_enviar = t_end-t_start; dim3 dimGrid(DIM_GRID,DIM_GRID); dim3 dimBlock(DIM_BLOCO,DIM_BLOCO); t_start = rtclock(); hipLaunchKernelGGL(( count_hist), dim3(dimGrid),dim3(dimBlock), 0, 0, d_data,d_h,n); hipDeviceSynchronize(); t_end = rtclock(); t_kernel = t_end-t_start; t_start = rtclock(); hipMemcpy(h,d_h,64*sizeof(float),hipMemcpyDeviceToHost); t_end = rtclock(); t_offload_receber = t_end-t_start; hipFree(d_data); hipFree(d_h); for(i = 0; i < 64; i++) h[i] /= n; double t_total = t_cbuffer+t_offload_enviar+t_kernel+t_offload_receber; printf("%lf\t%lf\t%lf\t%lf\t%lf\n",t_cbuffer,t_offload_enviar,t_kernel,t_offload_receber,t_total); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; t_start = rtclock(); Histogram(image, h); t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]); } fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); }
750fbad462118e8fc70d6c033bf4e93852d93504.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define DIM_BLOCO 32 #define DIM_GRID 1960 // 1960*1960*1024 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n'); c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n'); img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void count_hist(PPMPixel *data, float *h, unsigned int n_){ // n_ é ó total de pixels // Uma thread para cada combinação (pixel,rgb), onde pixel = 0, 1, ..., n_-1 e rgb = 0, 1, ..., 63 // Calcular índice da thread e os respectivos índices x, i, j, k, l // DIM_BLOCO = blockDim.x = blockDim.y // DIM_GRID = gridDim.x = gridDim.y unsigned int index = DIM_BLOCO*DIM_BLOCO*(DIM_GRID*blockIdx.x+blockIdx.y)+blockDim.y*threadIdx.x+threadIdx.y; unsigned int x = index/n_; unsigned int i = index%n_; unsigned int j = x/16; unsigned int k = (x-16*j)/4; unsigned int l = (x-16*j-4*k); if (index < 64*n_ && data[i].red == j && data[i].green == k && data[i].blue == l) { atomicAdd(&h[x],1.0); // o histograma é normalizado depois para evitar erro de precisão } } void Histogram(PPMImage *image, float *h) { int i, j, k, l, x, count; int rows, cols; unsigned int n = image->y * image->x; double t_start, t_end, t_cbuffer, t_offload_enviar, t_kernel, t_offload_receber; cols = image->x; rows = image->y; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } // Parte movida para a GPU unsigned int size = 3*sizeof(unsigned char)*n; PPMPixel *d_data; float *d_h; t_start = rtclock(); cudaMalloc((void **)&d_data,size); cudaMalloc((void **)&d_h,64*sizeof(float)); t_end = rtclock(); t_cbuffer = t_end-t_start; t_start = rtclock(); cudaMemcpy(d_data,image->data,size,cudaMemcpyHostToDevice); cudaMemcpy(d_h,h,64*sizeof(float),cudaMemcpyHostToDevice); t_end = rtclock(); t_offload_enviar = t_end-t_start; dim3 dimGrid(DIM_GRID,DIM_GRID); dim3 dimBlock(DIM_BLOCO,DIM_BLOCO); t_start = rtclock(); count_hist<<<dimGrid,dimBlock>>>(d_data,d_h,n); cudaDeviceSynchronize(); t_end = rtclock(); t_kernel = t_end-t_start; t_start = rtclock(); cudaMemcpy(h,d_h,64*sizeof(float),cudaMemcpyDeviceToHost); t_end = rtclock(); t_offload_receber = t_end-t_start; cudaFree(d_data); cudaFree(d_h); for(i = 0; i < 64; i++) h[i] /= n; double t_total = t_cbuffer+t_offload_enviar+t_kernel+t_offload_receber; printf("%lf\t%lf\t%lf\t%lf\t%lf\n",t_cbuffer,t_offload_enviar,t_kernel,t_offload_receber,t_total); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; t_start = rtclock(); Histogram(image, h); t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]); } fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); }
825172c694a806db44e9b9903ca94ccde356fc3d.hip
// !!! This is a file automatically generated by hipify!!! #include<windows.h> #include<GL\glew.h> #include<gl/GL.h> #include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> //#include <hip/hip_runtime_api.h> #include<cuda_gl_interop.h> #include"helper_timer.h" #include<hip/hip_vector_types.h> #include"vmath.h" #include <hip/device_functions.h> #include <device_launch_parameters.h> #pragma comment(lib,"User32.lib") #pragma comment(lib,"GDI32.lib") #pragma comment(lib,"glew32.lib") #pragma comment(lib,"opengl32.lib") #define WIN_WIDTH 800 #define WIN_HEIGHT 600 using namespace vmath; enum { HAD_ATTRIBUTE_POSITION = 0, HAD_ATTRIBUTE_COLOR, HAD_ATTRIBUTE_NORMAL, HAD_ATTRIBUTE_TEXTURE0, }; LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM); FILE* gpFile; HWND ghwnd; HDC ghdc; HGLRC ghrc; DWORD dwStyle; WINDOWPLACEMENT wpPrev = { sizeof(WINDOWPLACEMENT) }; bool gbActiveWindow = false; bool gbFullscreen = false; bool gbIsEscapeKeyPressed = false; GLuint gVertexShaderObject; GLuint gFragmentShaderObject; GLuint gShaderProgramObject; GLuint gVao; GLuint gVbo; GLuint gMVPUniform, gColorUniform; cudaGraphicsResource_t cuda_vbo_resource = 0; mat4 gPerspectiveProjectionMatrix; float gfAnimate = 0.0f; hipError_t err = hipSuccess; char str[256]; int MESH_WIDTH = 64; int MESH_HEIGHT = 64; GLfloat color[] = { 1.0f,1.0f,1.0f,1.0f }; GLfloat gfTranslateFactor = -1.0f; StopWatchInterface* timer = NULL; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling float avgFPS = 0.0f; unsigned int frameCount = 0; __global__ void calculate_vertices(float4* pos, unsigned int width, unsigned int height, float time) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; __syncthreads(); float v = y / (float)height; __syncthreads(); u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; float freq = 4.0f; float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f; pos[y * width + x] = make_float4(u, w, v, 1.0f); } int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpszCmdLine, int iCmdShow) { void initialize(void); void display(void); void uninitialize(int); WNDCLASSEX wndclass; HWND hwnd; MSG msg; TCHAR szClassName[] = TEXT("My App"); bool bDone = false; if (fopen_s(&gpFile, "Log.txt", "w") != NULL) { MessageBox(NULL, TEXT("Cannot Create Log File !!!"), TEXT("Error"), MB_OK); exit(EXIT_FAILURE); } else fprintf(gpFile, "Log File Created Successfully...\n"); fclose(gpFile); wndclass.cbSize = sizeof(WNDCLASSEX); wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; wndclass.cbClsExtra = 0; wndclass.cbWndExtra = 0; wndclass.hInstance = hInstance; wndclass.lpszClassName = szClassName; wndclass.lpszMenuName = NULL; wndclass.lpfnWndProc = WndProc; wndclass.hIcon = LoadIcon(NULL, IDI_APPLICATION); wndclass.hIconSm = LoadIcon(NULL, IDI_APPLICATION); wndclass.hCursor = LoadCursor(NULL, IDC_ARROW); wndclass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH); RegisterClassEx(&wndclass); hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName, TEXT("OpenGLPP : OpenGL - CUDA Interoperability"), WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE, 100, 100, WIN_WIDTH, WIN_HEIGHT, NULL, NULL, hInstance, NULL); if (hwnd == NULL) { fprintf(gpFile, "Cannot Create Window...\n"); uninitialize(1); } ghwnd = hwnd; ShowWindow(hwnd, iCmdShow); SetFocus(hwnd); SetForegroundWindow(hwnd); initialize(); while (bDone == false) { if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) bDone = true; else { TranslateMessage(&msg); DispatchMessage(&msg); } } else { if (gbActiveWindow == true) { if (gbIsEscapeKeyPressed == true) bDone = true; display(); } } } uninitialize(0); return((int)msg.wParam); } LRESULT CALLBACK WndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { void update(void); void resize(int, int); void ToggleFullscreen(void); switch (iMsg) { case WM_ACTIVATE: if (HIWORD(wParam) == 0) gbActiveWindow = true; else gbActiveWindow = false; break; case WM_CREATE: break; case WM_SIZE: resize(LOWORD(lParam), HIWORD(lParam)); break; case WM_KEYDOWN: switch (wParam) { case VK_ESCAPE: gbIsEscapeKeyPressed = true; break; case 0x31: //1 MESH_WIDTH = 64; MESH_HEIGHT = 64; update(); break; case 0x32: //2 MESH_WIDTH = 128; MESH_HEIGHT = 128; update(); break; case 0x33: //3 MESH_WIDTH = 256; MESH_HEIGHT = 256; update(); break; case 0x34: //4 MESH_WIDTH = 512; MESH_HEIGHT = 512; update(); break; case 0x35: //5 MESH_WIDTH = 1024; MESH_HEIGHT = 1024; update(); break; case 0x52: //R color[0] = 1.0f; color[1] = 0.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x47: //G color[0] = 0.0f; color[1] = 1.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x42: //B color[0] = 0.0f; color[1] = 0.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x43: //C color[0] = 0.0f; color[1] = 1.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x4D: //M color[0] = 1.0f; color[1] = 0.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x59: //Y color[0] = 1.0f; color[1] = 1.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x4B: //K color[0] = 0.0f; color[1] = 0.0f; color[2] = 0.0f; glClearColor(1.0f, 1.0f, 1.0f, 0.0f); break; case VK_UP: gfTranslateFactor -= 0.05f; break; case VK_DOWN: gfTranslateFactor += 0.05f; break; case 0x46: if (gbFullscreen == false) { ToggleFullscreen(); gbFullscreen = true; } else { ToggleFullscreen(); gbFullscreen = false; } break; default: color[0] = 1.0f; color[1] = 1.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; } break; case WM_DESTROY: PostQuitMessage(0); break; } return(DefWindowProc(hwnd, iMsg, wParam, lParam)); } void initialize(void) { void resize(int, int); void uninitialize(int); PIXELFORMATDESCRIPTOR pfd; int iPixelFormatIndex; ZeroMemory(&pfd, sizeof(PIXELFORMATDESCRIPTOR)); pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); pfd.nVersion = 1; pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 24; pfd.cRedBits = 8; pfd.cGreenBits = 8; pfd.cBlueBits = 8; pfd.cAlphaBits = 8; pfd.cDepthBits = 32; ghdc = GetDC(ghwnd); if (ghdc == NULL) { fprintf(gpFile, "GetDC() Failed.\n"); uninitialize(1); } iPixelFormatIndex = ChoosePixelFormat(ghdc, &pfd); if (iPixelFormatIndex == 0) { fprintf(gpFile, "ChoosePixelFormat() Failed.\n"); uninitialize(1); } if (SetPixelFormat(ghdc, iPixelFormatIndex, &pfd) == FALSE) { fprintf(gpFile, "SetPixelFormat() Failed.\n"); uninitialize(1); } ghrc = wglCreateContext(ghdc); if (ghrc == NULL) { fprintf(gpFile, "wglCreateContext() Failed.\n"); uninitialize(1); } if (wglMakeCurrent(ghdc, ghrc) == FALSE) { fprintf(gpFile, "wglMakeCurrent() Failed"); uninitialize(1); } GLenum glew_error = glewInit(); if (glew_error != GLEW_OK) { wglDeleteContext(ghrc); ghrc = NULL; ReleaseDC(ghwnd, ghdc); ghdc = NULL; } //Vertex Shader gVertexShaderObject = glCreateShader(GL_VERTEX_SHADER); const GLchar* vertexShaderSourceCode = "#version 450 core" \ "\n" \ "in vec4 vPosition;" \ "uniform mat4 u_mvp_matrix;" \ "void main(void)" \ "{" \ "gl_Position = u_mvp_matrix * vPosition;" \ "}"; glShaderSource(gVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL); glCompileShader(gVertexShaderObject); GLint iInfoLogLength = 0; GLint iShaderCompiledStatus = 0; char* szInfoLog = NULL; glGetShaderiv(gVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompiledStatus); if (iShaderCompiledStatus == GL_FALSE) { glGetShaderiv(gVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(gVertexShaderObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Vertex Shader Compilation Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } //Fragment Shader gFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER); const GLchar* fragmentShaderSourceCode = "#version 450 core"\ "\n"\ "out vec4 FragColor;"\ "uniform vec4 color;" \ "void main(void)"\ "{"\ "FragColor=color;"\ "}"; glShaderSource(gFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL); glCompileShader(gFragmentShaderObject); glGetShaderiv(gFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompiledStatus); if (iShaderCompiledStatus == GL_FALSE) { glGetShaderiv(gFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(gFragmentShaderObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Fragment Shader Compilation Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } //Shader Program gShaderProgramObject = glCreateProgram(); glAttachShader(gShaderProgramObject, gVertexShaderObject); glAttachShader(gShaderProgramObject, gFragmentShaderObject); glBindAttribLocation(gShaderProgramObject, HAD_ATTRIBUTE_POSITION, "vPosition"); glLinkProgram(gShaderProgramObject); GLint iShaderProgramLinkStatus = 0; glGetProgramiv(gShaderProgramObject, GL_LINK_STATUS, &iShaderProgramLinkStatus); if (iShaderProgramLinkStatus == GL_FALSE) { glGetProgramiv(gShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetProgramInfoLog(gShaderProgramObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Shader Program Link Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } gMVPUniform = glGetUniformLocation(gShaderProgramObject, "u_mvp_matrix"); gColorUniform = glGetUniformLocation(gShaderProgramObject, "color"); glGenVertexArrays(1, &gVao); glBindVertexArray(gVao); glGenBuffers(1, &gVbo); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glBufferData(GL_ARRAY_BUFFER, MESH_WIDTH * MESH_HEIGHT * 4 * sizeof(float), 0, GL_DYNAMIC_DRAW); glVertexAttribPointer(HAD_ATTRIBUTE_POSITION, 4, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(HAD_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); err = hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, gVbo, hipGraphicsMapFlagsWriteDiscard); if (err != hipSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", hipGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } glBindVertexArray(0); glClearDepth(1.0f); glDisable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glShadeModel(GL_SMOOTH); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glEnable(GL_CULL_FACE); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); gPerspectiveProjectionMatrix = mat4::identity(); sdkCreateTimer(&timer); resize(WIN_WIDTH, WIN_HEIGHT); } void runCuda(void) { void uninitialize(int); // MessageBox(NULL,TEXT("In RunCuda"),TEXT("MSG"),MB_OK); float4* dptr; size_t num_bytes; err = hipGraphicsMapResources(1, &cuda_vbo_resource, 0); if (err != hipSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", hipGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } err = hipGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes, cuda_vbo_resource); if (err != hipSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", hipGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } dim3 block(8, 8, 1); dim3 grid(MESH_WIDTH / block.x, MESH_HEIGHT / block.y, 1); //MessageBox(NULL,TEXT("RunCuda Before Kernel"),TEXT("MSG"),MB_OK); calculate_vertices << < grid, block >> > (dptr, MESH_WIDTH, MESH_HEIGHT, gfAnimate); // err=hipMemcpy(waveVerticesHost,waveVerticesDevice,MESH_WIDTH*MESH_HEIGHT*sizeof(float4),hipMemcpyDeviceToHost); // if(err!=hipSuccess) // { // printf("GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n",hipGetErrorString(err),__FILE__,__LINE__); // uninitialize(1); // exit(EXIT_FAILURE); // } hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0); // MessageBox(NULL,TEXT("Leaving RunCuda"),TEXT("MSG"),MB_OK); } void display(void) { void computeFPS(void); runCuda(); sdkStartTimer(&timer); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Use Shader Program Object glUseProgram(gShaderProgramObject); mat4 modelViewMatrix = mat4::identity(); mat4 modelViewProjectionMatrix = mat4::identity(); modelViewMatrix = translate(0.0f, 0.0f, gfTranslateFactor); modelViewProjectionMatrix = gPerspectiveProjectionMatrix * modelViewMatrix; glUniformMatrix4fv(gMVPUniform, 1, GL_FALSE, modelViewProjectionMatrix); glUniform4fv(gColorUniform, 1, color); glBindVertexArray(gVao); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glDrawArrays(GL_POINTS, 0, MESH_WIDTH * MESH_HEIGHT); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); glUseProgram(0); gfAnimate += 0.01f; sdkStopTimer(&timer); //computeFPS(); SwapBuffers(ghdc); } void update(void) { void uninitialize(int); glBindVertexArray(gVao); glGenBuffers(1, &gVbo); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glBufferData(GL_ARRAY_BUFFER, MESH_WIDTH * MESH_HEIGHT * 4 * sizeof(float), 0, GL_DYNAMIC_DRAW); glVertexAttribPointer(HAD_ATTRIBUTE_POSITION, 4, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(HAD_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); err = hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, gVbo, hipGraphicsMapFlagsWriteDiscard); if (err != hipSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", hipGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); uninitialize(1); exit(EXIT_FAILURE); } glBindVertexArray(0); } void computeFPS(void) { //float max(float, float); frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)max(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "OpenGL CUDA Interoperability %3.1f fps", avgFPS); SetWindowText(ghwnd, fps); } // float max(float a, float b) // { // if(a>b) // return a; // else // return b; // } void resize(int width, int height) { if (height == 0) height = 1; glViewport(0, 0, (GLsizei)width, (GLsizei)height); gPerspectiveProjectionMatrix = perspective(45.0f, (GLfloat)width / (GLfloat)height, 0.1f, 100.0f); } void ToggleFullscreen(void) { MONITORINFO mi = { sizeof(MONITORINFO) }; if (gbFullscreen == false) { dwStyle = GetWindowLong(ghwnd, GWL_STYLE); if (dwStyle & WS_OVERLAPPEDWINDOW) { if (GetWindowPlacement(ghwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(ghwnd, MONITORINFOF_PRIMARY), &mi)) { SetWindowLong(ghwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW); SetWindowPos(ghwnd, HWND_TOP, mi.rcMonitor.left, mi.rcMonitor.top, mi.rcMonitor.right - mi.rcMonitor.left, mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED); } } ShowCursor(FALSE); } else { SetWindowLong(ghwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(ghwnd, &wpPrev); SetWindowPos(ghwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER | SWP_NOOWNERZORDER | SWP_FRAMECHANGED); ShowCursor(TRUE); } } void uninitialize(int i_Exit_Flag) { if (gbFullscreen == false) { SetWindowLong(ghwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(ghwnd, &wpPrev); SetWindowPos(ghwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER | SWP_NOOWNERZORDER | SWP_FRAMECHANGED); ShowCursor(TRUE); } hipGraphicsUnregisterResource(cuda_vbo_resource); if (gVao) { glDeleteVertexArrays(1, &gVao); gVao = 0; } if (gVbo) { glDeleteBuffers(1, &gVbo); gVbo = 0; } //Detach Shader glDetachShader(gShaderProgramObject, gVertexShaderObject); glDetachShader(gShaderProgramObject, gFragmentShaderObject); //Delete Shader glDeleteShader(gVertexShaderObject); gVertexShaderObject = 0; glDeleteShader(gFragmentShaderObject); gFragmentShaderObject = 0; //Delete Program glDeleteProgram(gShaderProgramObject); gShaderProgramObject = 0; //Stray call to glUseProgram(0) glUseProgram(0); wglMakeCurrent(NULL, NULL); if (ghrc != NULL) { wglDeleteContext(ghrc); ghrc = NULL; } if (ghdc != NULL) { ReleaseDC(ghwnd, ghdc); ghdc = NULL; } if (i_Exit_Flag == 0) { fopen_s(&gpFile, "Log.txt", "a"); fprintf(gpFile, "Log File Closed Successfully"); fclose(gpFile); } else if (i_Exit_Flag == 1) { fopen_s(&gpFile, "Log.txt", "a"); fprintf(gpFile, "Log File Closed Erroniously"); fclose(gpFile); } gpFile = NULL; DestroyWindow(ghwnd); }
825172c694a806db44e9b9903ca94ccde356fc3d.cu
 #include<windows.h> #include<GL\glew.h> #include<gl/GL.h> #include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> //#include <cuda_runtime_api.h> #include<cuda_gl_interop.h> #include"helper_timer.h" #include<vector_types.h> #include"vmath.h" #include <device_functions.h> #include <device_launch_parameters.h> #pragma comment(lib,"User32.lib") #pragma comment(lib,"GDI32.lib") #pragma comment(lib,"glew32.lib") #pragma comment(lib,"opengl32.lib") #define WIN_WIDTH 800 #define WIN_HEIGHT 600 using namespace vmath; enum { HAD_ATTRIBUTE_POSITION = 0, HAD_ATTRIBUTE_COLOR, HAD_ATTRIBUTE_NORMAL, HAD_ATTRIBUTE_TEXTURE0, }; LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM); FILE* gpFile; HWND ghwnd; HDC ghdc; HGLRC ghrc; DWORD dwStyle; WINDOWPLACEMENT wpPrev = { sizeof(WINDOWPLACEMENT) }; bool gbActiveWindow = false; bool gbFullscreen = false; bool gbIsEscapeKeyPressed = false; GLuint gVertexShaderObject; GLuint gFragmentShaderObject; GLuint gShaderProgramObject; GLuint gVao; GLuint gVbo; GLuint gMVPUniform, gColorUniform; cudaGraphicsResource_t cuda_vbo_resource = 0; mat4 gPerspectiveProjectionMatrix; float gfAnimate = 0.0f; cudaError_t err = cudaSuccess; char str[256]; int MESH_WIDTH = 64; int MESH_HEIGHT = 64; GLfloat color[] = { 1.0f,1.0f,1.0f,1.0f }; GLfloat gfTranslateFactor = -1.0f; StopWatchInterface* timer = NULL; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling float avgFPS = 0.0f; unsigned int frameCount = 0; __global__ void calculate_vertices(float4* pos, unsigned int width, unsigned int height, float time) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; __syncthreads(); float v = y / (float)height; __syncthreads(); u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; float freq = 4.0f; float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f; pos[y * width + x] = make_float4(u, w, v, 1.0f); } int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpszCmdLine, int iCmdShow) { void initialize(void); void display(void); void uninitialize(int); WNDCLASSEX wndclass; HWND hwnd; MSG msg; TCHAR szClassName[] = TEXT("My App"); bool bDone = false; if (fopen_s(&gpFile, "Log.txt", "w") != NULL) { MessageBox(NULL, TEXT("Cannot Create Log File !!!"), TEXT("Error"), MB_OK); exit(EXIT_FAILURE); } else fprintf(gpFile, "Log File Created Successfully...\n"); fclose(gpFile); wndclass.cbSize = sizeof(WNDCLASSEX); wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; wndclass.cbClsExtra = 0; wndclass.cbWndExtra = 0; wndclass.hInstance = hInstance; wndclass.lpszClassName = szClassName; wndclass.lpszMenuName = NULL; wndclass.lpfnWndProc = WndProc; wndclass.hIcon = LoadIcon(NULL, IDI_APPLICATION); wndclass.hIconSm = LoadIcon(NULL, IDI_APPLICATION); wndclass.hCursor = LoadCursor(NULL, IDC_ARROW); wndclass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH); RegisterClassEx(&wndclass); hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName, TEXT("OpenGLPP : OpenGL - CUDA Interoperability"), WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE, 100, 100, WIN_WIDTH, WIN_HEIGHT, NULL, NULL, hInstance, NULL); if (hwnd == NULL) { fprintf(gpFile, "Cannot Create Window...\n"); uninitialize(1); } ghwnd = hwnd; ShowWindow(hwnd, iCmdShow); SetFocus(hwnd); SetForegroundWindow(hwnd); initialize(); while (bDone == false) { if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) bDone = true; else { TranslateMessage(&msg); DispatchMessage(&msg); } } else { if (gbActiveWindow == true) { if (gbIsEscapeKeyPressed == true) bDone = true; display(); } } } uninitialize(0); return((int)msg.wParam); } LRESULT CALLBACK WndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { void update(void); void resize(int, int); void ToggleFullscreen(void); switch (iMsg) { case WM_ACTIVATE: if (HIWORD(wParam) == 0) gbActiveWindow = true; else gbActiveWindow = false; break; case WM_CREATE: break; case WM_SIZE: resize(LOWORD(lParam), HIWORD(lParam)); break; case WM_KEYDOWN: switch (wParam) { case VK_ESCAPE: gbIsEscapeKeyPressed = true; break; case 0x31: //1 MESH_WIDTH = 64; MESH_HEIGHT = 64; update(); break; case 0x32: //2 MESH_WIDTH = 128; MESH_HEIGHT = 128; update(); break; case 0x33: //3 MESH_WIDTH = 256; MESH_HEIGHT = 256; update(); break; case 0x34: //4 MESH_WIDTH = 512; MESH_HEIGHT = 512; update(); break; case 0x35: //5 MESH_WIDTH = 1024; MESH_HEIGHT = 1024; update(); break; case 0x52: //R color[0] = 1.0f; color[1] = 0.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x47: //G color[0] = 0.0f; color[1] = 1.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x42: //B color[0] = 0.0f; color[1] = 0.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x43: //C color[0] = 0.0f; color[1] = 1.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x4D: //M color[0] = 1.0f; color[1] = 0.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x59: //Y color[0] = 1.0f; color[1] = 1.0f; color[2] = 0.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; case 0x4B: //K color[0] = 0.0f; color[1] = 0.0f; color[2] = 0.0f; glClearColor(1.0f, 1.0f, 1.0f, 0.0f); break; case VK_UP: gfTranslateFactor -= 0.05f; break; case VK_DOWN: gfTranslateFactor += 0.05f; break; case 0x46: if (gbFullscreen == false) { ToggleFullscreen(); gbFullscreen = true; } else { ToggleFullscreen(); gbFullscreen = false; } break; default: color[0] = 1.0f; color[1] = 1.0f; color[2] = 1.0f; glClearColor(0.0f, 0.0f, 0.0f, 0.0f); break; } break; case WM_DESTROY: PostQuitMessage(0); break; } return(DefWindowProc(hwnd, iMsg, wParam, lParam)); } void initialize(void) { void resize(int, int); void uninitialize(int); PIXELFORMATDESCRIPTOR pfd; int iPixelFormatIndex; ZeroMemory(&pfd, sizeof(PIXELFORMATDESCRIPTOR)); pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); pfd.nVersion = 1; pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 24; pfd.cRedBits = 8; pfd.cGreenBits = 8; pfd.cBlueBits = 8; pfd.cAlphaBits = 8; pfd.cDepthBits = 32; ghdc = GetDC(ghwnd); if (ghdc == NULL) { fprintf(gpFile, "GetDC() Failed.\n"); uninitialize(1); } iPixelFormatIndex = ChoosePixelFormat(ghdc, &pfd); if (iPixelFormatIndex == 0) { fprintf(gpFile, "ChoosePixelFormat() Failed.\n"); uninitialize(1); } if (SetPixelFormat(ghdc, iPixelFormatIndex, &pfd) == FALSE) { fprintf(gpFile, "SetPixelFormat() Failed.\n"); uninitialize(1); } ghrc = wglCreateContext(ghdc); if (ghrc == NULL) { fprintf(gpFile, "wglCreateContext() Failed.\n"); uninitialize(1); } if (wglMakeCurrent(ghdc, ghrc) == FALSE) { fprintf(gpFile, "wglMakeCurrent() Failed"); uninitialize(1); } GLenum glew_error = glewInit(); if (glew_error != GLEW_OK) { wglDeleteContext(ghrc); ghrc = NULL; ReleaseDC(ghwnd, ghdc); ghdc = NULL; } //Vertex Shader gVertexShaderObject = glCreateShader(GL_VERTEX_SHADER); const GLchar* vertexShaderSourceCode = "#version 450 core" \ "\n" \ "in vec4 vPosition;" \ "uniform mat4 u_mvp_matrix;" \ "void main(void)" \ "{" \ "gl_Position = u_mvp_matrix * vPosition;" \ "}"; glShaderSource(gVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL); glCompileShader(gVertexShaderObject); GLint iInfoLogLength = 0; GLint iShaderCompiledStatus = 0; char* szInfoLog = NULL; glGetShaderiv(gVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompiledStatus); if (iShaderCompiledStatus == GL_FALSE) { glGetShaderiv(gVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(gVertexShaderObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Vertex Shader Compilation Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } //Fragment Shader gFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER); const GLchar* fragmentShaderSourceCode = "#version 450 core"\ "\n"\ "out vec4 FragColor;"\ "uniform vec4 color;" \ "void main(void)"\ "{"\ "FragColor=color;"\ "}"; glShaderSource(gFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL); glCompileShader(gFragmentShaderObject); glGetShaderiv(gFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompiledStatus); if (iShaderCompiledStatus == GL_FALSE) { glGetShaderiv(gFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(gFragmentShaderObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Fragment Shader Compilation Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } //Shader Program gShaderProgramObject = glCreateProgram(); glAttachShader(gShaderProgramObject, gVertexShaderObject); glAttachShader(gShaderProgramObject, gFragmentShaderObject); glBindAttribLocation(gShaderProgramObject, HAD_ATTRIBUTE_POSITION, "vPosition"); glLinkProgram(gShaderProgramObject); GLint iShaderProgramLinkStatus = 0; glGetProgramiv(gShaderProgramObject, GL_LINK_STATUS, &iShaderProgramLinkStatus); if (iShaderProgramLinkStatus == GL_FALSE) { glGetProgramiv(gShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength > 0) { szInfoLog = (char*)malloc(iInfoLogLength); if (szInfoLog != NULL) { GLsizei written; glGetProgramInfoLog(gShaderProgramObject, iInfoLogLength, &written, szInfoLog); fprintf(gpFile, "Shader Program Link Log : %s\n", szInfoLog); free(szInfoLog); uninitialize(1); exit(0); } } } gMVPUniform = glGetUniformLocation(gShaderProgramObject, "u_mvp_matrix"); gColorUniform = glGetUniformLocation(gShaderProgramObject, "color"); glGenVertexArrays(1, &gVao); glBindVertexArray(gVao); glGenBuffers(1, &gVbo); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glBufferData(GL_ARRAY_BUFFER, MESH_WIDTH * MESH_HEIGHT * 4 * sizeof(float), 0, GL_DYNAMIC_DRAW); glVertexAttribPointer(HAD_ATTRIBUTE_POSITION, 4, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(HAD_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); err = cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, gVbo, cudaGraphicsMapFlagsWriteDiscard); if (err != cudaSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", cudaGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } glBindVertexArray(0); glClearDepth(1.0f); glDisable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glShadeModel(GL_SMOOTH); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glEnable(GL_CULL_FACE); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); gPerspectiveProjectionMatrix = mat4::identity(); sdkCreateTimer(&timer); resize(WIN_WIDTH, WIN_HEIGHT); } void runCuda(void) { void uninitialize(int); // MessageBox(NULL,TEXT("In RunCuda"),TEXT("MSG"),MB_OK); float4* dptr; size_t num_bytes; err = cudaGraphicsMapResources(1, &cuda_vbo_resource, 0); if (err != cudaSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", cudaGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } err = cudaGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes, cuda_vbo_resource); if (err != cudaSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", cudaGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); //cleanup(); exit(EXIT_FAILURE); } dim3 block(8, 8, 1); dim3 grid(MESH_WIDTH / block.x, MESH_HEIGHT / block.y, 1); //MessageBox(NULL,TEXT("RunCuda Before Kernel"),TEXT("MSG"),MB_OK); calculate_vertices << < grid, block >> > (dptr, MESH_WIDTH, MESH_HEIGHT, gfAnimate); // err=cudaMemcpy(waveVerticesHost,waveVerticesDevice,MESH_WIDTH*MESH_HEIGHT*sizeof(float4),cudaMemcpyDeviceToHost); // if(err!=cudaSuccess) // { // printf("GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n",cudaGetErrorString(err),__FILE__,__LINE__); // uninitialize(1); // exit(EXIT_FAILURE); // } cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0); // MessageBox(NULL,TEXT("Leaving RunCuda"),TEXT("MSG"),MB_OK); } void display(void) { void computeFPS(void); runCuda(); sdkStartTimer(&timer); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Use Shader Program Object glUseProgram(gShaderProgramObject); mat4 modelViewMatrix = mat4::identity(); mat4 modelViewProjectionMatrix = mat4::identity(); modelViewMatrix = translate(0.0f, 0.0f, gfTranslateFactor); modelViewProjectionMatrix = gPerspectiveProjectionMatrix * modelViewMatrix; glUniformMatrix4fv(gMVPUniform, 1, GL_FALSE, modelViewProjectionMatrix); glUniform4fv(gColorUniform, 1, color); glBindVertexArray(gVao); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glDrawArrays(GL_POINTS, 0, MESH_WIDTH * MESH_HEIGHT); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); glUseProgram(0); gfAnimate += 0.01f; sdkStopTimer(&timer); //computeFPS(); SwapBuffers(ghdc); } void update(void) { void uninitialize(int); glBindVertexArray(gVao); glGenBuffers(1, &gVbo); glBindBuffer(GL_ARRAY_BUFFER, gVbo); glBufferData(GL_ARRAY_BUFFER, MESH_WIDTH * MESH_HEIGHT * 4 * sizeof(float), 0, GL_DYNAMIC_DRAW); glVertexAttribPointer(HAD_ATTRIBUTE_POSITION, 4, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(HAD_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); err = cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, gVbo, cudaGraphicsMapFlagsWriteDiscard); if (err != cudaSuccess) { sprintf(str, "GPU Memory Fatal Error = %s In File Name %s at Line No.%d\nExitting...\n", cudaGetErrorString(err), __FILE__, __LINE__); MessageBox(NULL, str, TEXT("MSG"), MB_OK); uninitialize(1); exit(EXIT_FAILURE); } glBindVertexArray(0); } void computeFPS(void) { //float max(float, float); frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)max(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "OpenGL CUDA Interoperability %3.1f fps", avgFPS); SetWindowText(ghwnd, fps); } // float max(float a, float b) // { // if(a>b) // return a; // else // return b; // } void resize(int width, int height) { if (height == 0) height = 1; glViewport(0, 0, (GLsizei)width, (GLsizei)height); gPerspectiveProjectionMatrix = perspective(45.0f, (GLfloat)width / (GLfloat)height, 0.1f, 100.0f); } void ToggleFullscreen(void) { MONITORINFO mi = { sizeof(MONITORINFO) }; if (gbFullscreen == false) { dwStyle = GetWindowLong(ghwnd, GWL_STYLE); if (dwStyle & WS_OVERLAPPEDWINDOW) { if (GetWindowPlacement(ghwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(ghwnd, MONITORINFOF_PRIMARY), &mi)) { SetWindowLong(ghwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW); SetWindowPos(ghwnd, HWND_TOP, mi.rcMonitor.left, mi.rcMonitor.top, mi.rcMonitor.right - mi.rcMonitor.left, mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED); } } ShowCursor(FALSE); } else { SetWindowLong(ghwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(ghwnd, &wpPrev); SetWindowPos(ghwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER | SWP_NOOWNERZORDER | SWP_FRAMECHANGED); ShowCursor(TRUE); } } void uninitialize(int i_Exit_Flag) { if (gbFullscreen == false) { SetWindowLong(ghwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(ghwnd, &wpPrev); SetWindowPos(ghwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER | SWP_NOOWNERZORDER | SWP_FRAMECHANGED); ShowCursor(TRUE); } cudaGraphicsUnregisterResource(cuda_vbo_resource); if (gVao) { glDeleteVertexArrays(1, &gVao); gVao = 0; } if (gVbo) { glDeleteBuffers(1, &gVbo); gVbo = 0; } //Detach Shader glDetachShader(gShaderProgramObject, gVertexShaderObject); glDetachShader(gShaderProgramObject, gFragmentShaderObject); //Delete Shader glDeleteShader(gVertexShaderObject); gVertexShaderObject = 0; glDeleteShader(gFragmentShaderObject); gFragmentShaderObject = 0; //Delete Program glDeleteProgram(gShaderProgramObject); gShaderProgramObject = 0; //Stray call to glUseProgram(0) glUseProgram(0); wglMakeCurrent(NULL, NULL); if (ghrc != NULL) { wglDeleteContext(ghrc); ghrc = NULL; } if (ghdc != NULL) { ReleaseDC(ghwnd, ghdc); ghdc = NULL; } if (i_Exit_Flag == 0) { fopen_s(&gpFile, "Log.txt", "a"); fprintf(gpFile, "Log File Closed Successfully"); fclose(gpFile); } else if (i_Exit_Flag == 1) { fopen_s(&gpFile, "Log.txt", "a"); fprintf(gpFile, "Log File Closed Erroniously"); fclose(gpFile); } gpFile = NULL; DestroyWindow(ghwnd); }
0281c936a3956a3d5970d0fa8d430ec76687bb2c.hip
// !!! This is a file automatically generated by hipify!!! // // Plane.cpp // PathTracer // // Created by Federico Saldarini on 4/16/20. // Copyright 2020 Federico Saldarini. All rights reserved. // #include "Plane.hpp" #include "hip/hip_runtime.h" __device__ Plane::Plane(const gvec3& position, float width, float height, Material* material) : position(position), width(width), height(height), material(material) { } __device__ Plane::~Plane() { delete material; } __device__ bool Plane::boundingBox(double t0, double t1, AABA& bBox) const { gvec3 extent{ width / 2.f, height / 2.f, 0.0001 }; bBox = AABA(position - extent, position + extent); return true; } __device__ bool Plane::hit(const Ray& ray, float tmin, float tmax, HitInfo& info) const { gvec3 x{ width /2.f, 0.f, position.z}; gvec3 y{ 0.f, height / 2.f, position.z }; auto n = normalize(cross(x, y)); auto t = (dot(n, position) - dot(n, ray.origin)) / dot(n, ray.dir); if (tmin > t || t > tmax) { return false; } auto hitPoint = ray(t); if (length2(hitPoint - position) > length2(x + y)) { return false; } info.isFrontFace = dot(n, ray.dir) < 0.f; info.material = material; info.hitPoint = hitPoint; info.normal = n; info.t = t; return true; }
0281c936a3956a3d5970d0fa8d430ec76687bb2c.cu
// // Plane.cpp // PathTracer // // Created by Federico Saldarini on 4/16/20. // Copyright © 2020 Federico Saldarini. All rights reserved. // #include "Plane.hpp" #include "cuda_runtime.h" __device__ Plane::Plane(const gvec3& position, float width, float height, Material* material) : position(position), width(width), height(height), material(material) { } __device__ Plane::~Plane() { delete material; } __device__ bool Plane::boundingBox(double t0, double t1, AABA& bBox) const { gvec3 extent{ width / 2.f, height / 2.f, 0.0001 }; bBox = AABA(position - extent, position + extent); return true; } __device__ bool Plane::hit(const Ray& ray, float tmin, float tmax, HitInfo& info) const { gvec3 x{ width /2.f, 0.f, position.z}; gvec3 y{ 0.f, height / 2.f, position.z }; auto n = normalize(cross(x, y)); auto t = (dot(n, position) - dot(n, ray.origin)) / dot(n, ray.dir); if (tmin > t || t > tmax) { return false; } auto hitPoint = ray(t); if (length2(hitPoint - position) > length2(x + y)) { return false; } info.isFrontFace = dot(n, ray.dir) < 0.f; info.material = material; info.hitPoint = hitPoint; info.normal = n; info.t = t; return true; }
eebe317f2740b73445c9bea95360c124958814a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define N 1000 hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; // current block's x dim. if (tid < N) c[tid] = a[tid] + b[tid]; // add as long as it is smaller than input vector., } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; int stat; // Start allocating memory for 3 vectors in GPU. stat = hipMalloc((void**)&dev_a, N * sizeof(int)); stat = hipMalloc((void**)&dev_b, N * sizeof(int)); stat = hipMalloc((void**)&dev_c, N * sizeof(int)); // Construct vectors values for a and b vector. for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } // Copy the summing vectors to device. hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); add << <N, 1 >> > (dev_a, dev_b, dev_c); // Copy the summed vector back to host. hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); // Print the vector now. for (int i = 0; i < N; i++) printf("\n%d + %d = %d", a[i], b[i], c[i]); // Release device memory. hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); getchar(); return 0; }
eebe317f2740b73445c9bea95360c124958814a0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define N 1000 cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; // current block's x dim. if (tid < N) c[tid] = a[tid] + b[tid]; // add as long as it is smaller than input vector., } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; int stat; // Start allocating memory for 3 vectors in GPU. stat = cudaMalloc((void**)&dev_a, N * sizeof(int)); stat = cudaMalloc((void**)&dev_b, N * sizeof(int)); stat = cudaMalloc((void**)&dev_c, N * sizeof(int)); // Construct vectors values for a and b vector. for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } // Copy the summing vectors to device. cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); add << <N, 1 >> > (dev_a, dev_b, dev_c); // Copy the summed vector back to host. cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); // Print the vector now. for (int i = 0; i < N; i++) printf("\n%d + %d = %d", a[i], b[i], c[i]); // Release device memory. cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); getchar(); return 0; }
7af0130e93a84605148262eb1e71e0bd2eec6053.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifdef DEBUG #define BLOCKDIM 4 #define STEPS 10 #else #define BLOCKDIM 256 #define STEPS 40000 #endif #define USE_PINNED_MEMORY 0 extern "C" __global__ void rule30ca_kernel(mint * prevRow, mint * nextRow, mint width) { __shared__ int smem[BLOCKDIM+2]; int tx = threadIdx.x, bx = blockIdx.x; int index = tx + bx*BLOCKDIM; smem[tx+1] = index < width ? prevRow[index] : 0; if (tx == 0) smem[0] = index > 0 ? prevRow[index-1] : 0; else if (tx == BLOCKDIM-1) smem[BLOCKDIM+1] = index < width-1 ? prevRow[index+1] : 0; __syncthreads(); if (index < width) nextRow[index] = smem[tx] ^ (smem[tx+1] | smem[tx+2]); } int main( ) { mint * ca, * d_prevRow, * d_nextRow, * d_tmp; int steps; int width; int ii; #ifdef DEBUG int jj; #endif #if USE_PINNED_MEMORY hipError_t err; #endif steps = STEPS; width = 2*steps-1; /* Init CA */ #if USE_PINNED_MEMORY err = hipHostMalloc((void **) &ca, width*steps*sizeof(int)); if (err != hipSuccess) { printf("Could not allocate memory. Try reducing the number of steps.\n"); exit(1); } memset((void *) ca, 0, width*steps*sizeof(int)); #else ca = (mint *) calloc(width*steps, sizeof(int)); if (ca == NULL) { printf("Could not allocate memory. Try reducing the number of steps.\n"); exit(1); } #endif ca[width/2] = 1; /* allocate GPU mem */ hipMalloc((void **) &d_prevRow, width*sizeof(int)); hipMalloc((void **) &d_nextRow, width*sizeof(int)); /* copy previous row */ hipMemcpy(d_prevRow, ca, width*sizeof(int), hipMemcpyHostToDevice); dim3 blockDim(BLOCKDIM); dim3 gridDim((width + BLOCKDIM - 1)/BLOCKDIM); for (ii = 1; ii < steps; ii++) { hipLaunchKernelGGL(( rule30ca_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_prevRow, d_nextRow, width); hipMemcpy(&ca[ii*width + width/2 - ii], &d_nextRow[width/2 - ii], (2*ii+1)*sizeof(int), hipMemcpyDeviceToHost); d_tmp = d_nextRow; d_nextRow = d_prevRow; d_prevRow = d_tmp; } #ifdef DEBUG for (ii = 0; ii < steps; ii++) { for (jj = 0; jj < width; jj++) { printf("%d", ca[ii*width + jj]); } printf("\n"); } #endif hipFree(d_nextRow); hipFree(d_prevRow); #if USE_PINNED_MEMORY hipHostFree(ca); #else free(ca); #endif return 0; }
7af0130e93a84605148262eb1e71e0bd2eec6053.cu
#include <stdio.h> #ifdef DEBUG #define BLOCKDIM 4 #define STEPS 10 #else #define BLOCKDIM 256 #define STEPS 40000 #endif #define USE_PINNED_MEMORY 0 extern "C" __global__ void rule30ca_kernel(mint * prevRow, mint * nextRow, mint width) { __shared__ int smem[BLOCKDIM+2]; int tx = threadIdx.x, bx = blockIdx.x; int index = tx + bx*BLOCKDIM; smem[tx+1] = index < width ? prevRow[index] : 0; if (tx == 0) smem[0] = index > 0 ? prevRow[index-1] : 0; else if (tx == BLOCKDIM-1) smem[BLOCKDIM+1] = index < width-1 ? prevRow[index+1] : 0; __syncthreads(); if (index < width) nextRow[index] = smem[tx] ^ (smem[tx+1] | smem[tx+2]); } int main( ) { mint * ca, * d_prevRow, * d_nextRow, * d_tmp; int steps; int width; int ii; #ifdef DEBUG int jj; #endif #if USE_PINNED_MEMORY cudaError_t err; #endif steps = STEPS; width = 2*steps-1; /* Init CA */ #if USE_PINNED_MEMORY err = cudaMallocHost((void **) &ca, width*steps*sizeof(int)); if (err != cudaSuccess) { printf("Could not allocate memory. Try reducing the number of steps.\n"); exit(1); } memset((void *) ca, 0, width*steps*sizeof(int)); #else ca = (mint *) calloc(width*steps, sizeof(int)); if (ca == NULL) { printf("Could not allocate memory. Try reducing the number of steps.\n"); exit(1); } #endif ca[width/2] = 1; /* allocate GPU mem */ cudaMalloc((void **) &d_prevRow, width*sizeof(int)); cudaMalloc((void **) &d_nextRow, width*sizeof(int)); /* copy previous row */ cudaMemcpy(d_prevRow, ca, width*sizeof(int), cudaMemcpyHostToDevice); dim3 blockDim(BLOCKDIM); dim3 gridDim((width + BLOCKDIM - 1)/BLOCKDIM); for (ii = 1; ii < steps; ii++) { rule30ca_kernel<<<gridDim, blockDim>>>(d_prevRow, d_nextRow, width); cudaMemcpy(&ca[ii*width + width/2 - ii], &d_nextRow[width/2 - ii], (2*ii+1)*sizeof(int), cudaMemcpyDeviceToHost); d_tmp = d_nextRow; d_nextRow = d_prevRow; d_prevRow = d_tmp; } #ifdef DEBUG for (ii = 0; ii < steps; ii++) { for (jj = 0; jj < width; jj++) { printf("%d", ca[ii*width + jj]); } printf("\n"); } #endif cudaFree(d_nextRow); cudaFree(d_prevRow); #if USE_PINNED_MEMORY cudaFreeHost(ca); #else free(ca); #endif return 0; }
d2c5fffc70f547b5040e58d97bed561ba97fa29a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "thrust/device_vector.h" #include "thrust/host_vector.h" #include "thrust/random/normal_distribution.h" #include "thrust/random/linear_congruential_engine.h" #include "sm_35_atomic_functions.h" #include <stdio.h> #include <iostream> #include <vector> #include <algorithm> #include <random> #ifdef MNIST_DIGIT_RECOG #include "idx.h" #endif #include "Neuron.h" #include "threads.h" //#include "dot_adder.h" using namespace std; //using namespace thrust; idx_img* imgs, *imgs2; idx_labels* lbl, *lbl2; double Func(double x) { return 2; } std::tuple<float**, float**, float**, float**, int, int> productDataset_MNIST() { int n = 60000; float** ins_train = (float**)new float*[n]; float** outs_train = (float**)new float*[n]; int m = 10000; float** ins_test = new float*[m]; float** outs_test = new float*[m]; /*let the dataset's function be even numbers from 1 to 4096*/ idx_img* imgs, *imgs2; idx_labels* lbl, *lbl2; lbl = new idx_labels("../digits/trainlabel.bin"); imgs = new idx_img("../digits/trainimg.bin", 60000); for(int i = 0; i < n; i++) { float* tmp = oneHotEncode(lbl->labels.values[i]); ins_train[i] = normalizeArray(imgs->imgs[i].values, 28*28); outs_train[i] = new float[10]; for(int j = 0; j < 10; j++) { outs_train[i][j] = tmp[j]; } } lbl2 = new idx_labels("../digits/testlabel.bin"); imgs2 = new idx_img("../digits/testimg.bin", 10000); for(int i = 0; i < m; i++) { float* tmp = oneHotEncode(lbl2->labels.values[i]); ins_test[i] = normalizeArray(imgs2->imgs[i].values, 28*28); outs_test[i] = new float[10]; for(int j = 0; j < 10; j++) { outs_test[i][j] = tmp[j]; } } return {ins_train, outs_train, ins_test, outs_test, n, m}; } void mathsApprox() { int epoch = 10; float** ii, **oo, **iit, **oot; int tot_samples_train, tot_samples_test; std::tie(ii, oo, iit, oot, tot_samples_train, tot_samples_test) = productDataset_MNIST(); auto inputLayer = new InputLayer(28*28, RELU); auto x = new DenseLayer(200, RELU, inputLayer); x = new DenseLayer(50, TANH, x); auto outputLayer = new DenseLayer(10, SOFTMAX, x); NeuralNet_FF* nn = new NeuralNet_FF(inputLayer, outputLayer, 0.01, LOSS_CROSSENTROPY, 0.01, 0.01); NeuralEngine engine(nn); for(int i = 1; i <= epoch; i++) { engine.Train(ii, oo, tot_samples_train, 1, true); engine.Test(iit, oot, tot_samples_test); printf("\nValidation Completed! epoch %d ", i); } } void test_function(); int main() { //hipSetDevice(0); //digit_recog(); mathsApprox(); // test_function(); return 0; } void test_function() { int epoch = 1; float** ii, **oo, **iit, **oot; int tot_samples_train, tot_samples_test; float test[4] = {1,2,3,4}; float testout[3] = {0, 1, 0}; ii = new float*[1]; oo = new float*[1]; iit = new float*[1]; oot = new float*[1]; ii[0] = test; oo[0] = testout; iit[0] = test; oot[0] = testout; tot_samples_train = 1; tot_samples_test = 1; auto inputLayer = new InputLayer(4, LINEAR); auto x = new DenseLayer(5, LINEAR, inputLayer); auto y = new DenseLayer(5, LINEAR, x); auto outputLayer = new DenseLayer(3, LINEAR, y); float tw1[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20}; float tw2[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}; float tw3[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; x->setWeights(tw1); y->setWeights(tw2); outputLayer->setWeights(tw3); NeuralNet_FF* nn = new NeuralNet_FF(inputLayer, outputLayer, 0.01, LOSS_CROSSENTROPY, 0.01, 0.01); NeuralEngine engine(nn); engine.Train(ii, oo, tot_samples_train, epoch); // engine.Test(iit, oot, tot_samples_test); }
d2c5fffc70f547b5040e58d97bed561ba97fa29a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "thrust/device_vector.h" #include "thrust/host_vector.h" #include "thrust/random/normal_distribution.h" #include "thrust/random/linear_congruential_engine.h" #include "sm_35_atomic_functions.h" #include <stdio.h> #include <iostream> #include <vector> #include <algorithm> #include <random> #ifdef MNIST_DIGIT_RECOG #include "idx.h" #endif #include "Neuron.h" #include "threads.h" //#include "dot_adder.h" using namespace std; //using namespace thrust; idx_img* imgs, *imgs2; idx_labels* lbl, *lbl2; double Func(double x) { return 2; } std::tuple<float**, float**, float**, float**, int, int> productDataset_MNIST() { int n = 60000; float** ins_train = (float**)new float*[n]; float** outs_train = (float**)new float*[n]; int m = 10000; float** ins_test = new float*[m]; float** outs_test = new float*[m]; /*let the dataset's function be even numbers from 1 to 4096*/ idx_img* imgs, *imgs2; idx_labels* lbl, *lbl2; lbl = new idx_labels("../digits/trainlabel.bin"); imgs = new idx_img("../digits/trainimg.bin", 60000); for(int i = 0; i < n; i++) { float* tmp = oneHotEncode(lbl->labels.values[i]); ins_train[i] = normalizeArray(imgs->imgs[i].values, 28*28); outs_train[i] = new float[10]; for(int j = 0; j < 10; j++) { outs_train[i][j] = tmp[j]; } } lbl2 = new idx_labels("../digits/testlabel.bin"); imgs2 = new idx_img("../digits/testimg.bin", 10000); for(int i = 0; i < m; i++) { float* tmp = oneHotEncode(lbl2->labels.values[i]); ins_test[i] = normalizeArray(imgs2->imgs[i].values, 28*28); outs_test[i] = new float[10]; for(int j = 0; j < 10; j++) { outs_test[i][j] = tmp[j]; } } return {ins_train, outs_train, ins_test, outs_test, n, m}; } void mathsApprox() { int epoch = 10; float** ii, **oo, **iit, **oot; int tot_samples_train, tot_samples_test; std::tie(ii, oo, iit, oot, tot_samples_train, tot_samples_test) = productDataset_MNIST(); auto inputLayer = new InputLayer(28*28, RELU); auto x = new DenseLayer(200, RELU, inputLayer); x = new DenseLayer(50, TANH, x); auto outputLayer = new DenseLayer(10, SOFTMAX, x); NeuralNet_FF* nn = new NeuralNet_FF(inputLayer, outputLayer, 0.01, LOSS_CROSSENTROPY, 0.01, 0.01); NeuralEngine engine(nn); for(int i = 1; i <= epoch; i++) { engine.Train(ii, oo, tot_samples_train, 1, true); engine.Test(iit, oot, tot_samples_test); printf("\nValidation Completed! epoch %d ", i); } } void test_function(); int main() { //cudaSetDevice(0); //digit_recog(); mathsApprox(); // test_function(); return 0; } void test_function() { int epoch = 1; float** ii, **oo, **iit, **oot; int tot_samples_train, tot_samples_test; float test[4] = {1,2,3,4}; float testout[3] = {0, 1, 0}; ii = new float*[1]; oo = new float*[1]; iit = new float*[1]; oot = new float*[1]; ii[0] = test; oo[0] = testout; iit[0] = test; oot[0] = testout; tot_samples_train = 1; tot_samples_test = 1; auto inputLayer = new InputLayer(4, LINEAR); auto x = new DenseLayer(5, LINEAR, inputLayer); auto y = new DenseLayer(5, LINEAR, x); auto outputLayer = new DenseLayer(3, LINEAR, y); float tw1[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20}; float tw2[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}; float tw3[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; x->setWeights(tw1); y->setWeights(tw2); outputLayer->setWeights(tw3); NeuralNet_FF* nn = new NeuralNet_FF(inputLayer, outputLayer, 0.01, LOSS_CROSSENTROPY, 0.01, 0.01); NeuralEngine engine(nn); engine.Train(ii, oo, tot_samples_train, epoch); // engine.Test(iit, oot, tot_samples_test); }
bfe136a4688e5e418f1cc6159480708479fbbc22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_ByteMaskedArray_mask(T* tomask, const C* frommask, int64_t length, bool validwhen, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { tomask[thread_id] = (frommask[thread_id] != 0) != validwhen; } } }
bfe136a4688e5e418f1cc6159480708479fbbc22.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_ByteMaskedArray_mask(T* tomask, const C* frommask, int64_t length, bool validwhen, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { tomask[thread_id] = (frommask[thread_id] != 0) != validwhen; } } }
6587fdd00a808e0b6a6a730812426a94913bc869.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cstdint> #include <cstdio> #include <cuda_device_runtime_api.h> __global__ void childKernel(){ if(threadIdx.x == 0 && blockIdx.x == 0 && blockIdx.y == 0){ printf("hello from childKernel\n"); } } __global__ void parentKernel() { hipLaunchKernelGGL(( childKernel), dim3(dim3(60000,10)), dim3(64), 0, 0, ); auto ret = hipDeviceSynchronize(); if(ret != hipSuccess){ printf("CudaStreamSynchronize failed with %i",ret); } printf("done\n"); } int main() { hipGraph_t graph; cudaGraphCreate(&graph,0); hipGraphNode_t node; cudaKernelNodeParams params; params.func = (void*) parentKernel; params.extra = nullptr; params.gridDim = dim3(1); params.blockDim = dim3(1); params.sharedMemBytes = 0; params.kernelParams = nullptr; cudaGraphAddKernelNode(&node,graph,nullptr,0,&params); hipGraphExec_t instance; hipGraphInstantiate(&instance,graph,nullptr,nullptr,0); hipStream_t myStream; hipStreamCreate(&myStream); for(int i = 0; i < 100000; ++i){ hipGraphLaunch(instance,myStream); auto err = hipStreamSynchronize(myStream); if (err != hipSuccess) { printf("CUDA Error %d occured\n", err); break; } } hipGraphExecDestroy(instance); hipGraphDestroy(graph); hipStreamDestroy(myStream); return 0; }
6587fdd00a808e0b6a6a730812426a94913bc869.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cstdint> #include <cstdio> #include <cuda_device_runtime_api.h> __global__ void childKernel(){ if(threadIdx.x == 0 && blockIdx.x == 0 && blockIdx.y == 0){ printf("hello from childKernel\n"); } } __global__ void parentKernel() { childKernel<<<dim3(60000,10), 64>>>(); auto ret = cudaDeviceSynchronize(); if(ret != cudaSuccess){ printf("CudaStreamSynchronize failed with %i",ret); } printf("done\n"); } int main() { cudaGraph_t graph; cudaGraphCreate(&graph,0); cudaGraphNode_t node; cudaKernelNodeParams params; params.func = (void*) parentKernel; params.extra = nullptr; params.gridDim = dim3(1); params.blockDim = dim3(1); params.sharedMemBytes = 0; params.kernelParams = nullptr; cudaGraphAddKernelNode(&node,graph,nullptr,0,&params); cudaGraphExec_t instance; cudaGraphInstantiate(&instance,graph,nullptr,nullptr,0); cudaStream_t myStream; cudaStreamCreate(&myStream); for(int i = 0; i < 100000; ++i){ cudaGraphLaunch(instance,myStream); auto err = cudaStreamSynchronize(myStream); if (err != cudaSuccess) { printf("CUDA Error %d occured\n", err); break; } } cudaGraphExecDestroy(instance); cudaGraphDestroy(graph); cudaStreamDestroy(myStream); return 0; }
57ca6f265c8f5eee913e7ea359c92ae2634fc23b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. #include <tuple> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <c10/util/Optional.h> #include "inplace_abn.h" #include "utils.h" #include "cuda_utils.cuh" #include "inplace_abn_kernels.cuh" #include "dispatch.h" /*********************************************************************************************************************** * Templated implementations **********************************************************************************************************************/ template<typename scalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_template(const at::Tensor& x_) { // Normalize shape and get dimensions auto x = normalize_shape(x_); auto num = x.size(0), chn = x.size(1), sp = x.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; auto acc_options = x.options(); if (x.scalar_type() == at::ScalarType::Half) { acc_options = acc_options.dtype(at::ScalarType::Float); } // Initialize output tensors auto mean = at::empty({chn}, acc_options); auto var = at::empty({chn}, acc_options); auto count = at::full({1}, num * sp, x.options().dtype(at::ScalarType::Long)); // Make accessors auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(chn); int tf = getNumThreads(sp); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE / tf)); // Invoke kernel hipLaunchKernelGGL(( statistics_kernel<scalar_t, accscalar_t, index_t>), dim3(blocks), dim3(threads), 0, stream, x_accessor, mean_accessor, var_accessor); return std::make_tuple(mean, var, count); } template<typename scalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_template( const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) { auto num = all_mean.size(0), chn = all_mean.size(1); // Initialize output tensors auto mean = at::empty({chn}, all_mean.options()); auto var = at::empty({chn}, all_var.options()); auto count = all_count.sum({0}); // Make accessors auto all_mean_accessor = all_mean.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>(); auto all_var_accessor = all_var.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>(); auto all_count_accessor = all_count.packed_accessor<int64_t, 2, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int threads = getNumThreads(chn); int blocks = std::max<int>(1, chn / threads); // Invoke kernel hipLaunchKernelGGL(( reduce_statistics_kernel<scalar_t, index_t>), dim3(blocks), dim3(threads), 0, stream, all_mean_accessor, all_var_accessor, all_count_accessor, mean_accessor, var_accessor); return std::make_tuple(mean, var, count); } template<typename scalar_t, typename prmscalar_t, typename index_t> void forward_template(at::Tensor& x_, const at::Tensor& mean, const at::Tensor& var, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { // Normalize shape and get dimensions auto x = normalize_shape(x_); auto num = x.size(0), chn = x.size(1), sp = x.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; // Make accessors auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias); // Kernel parameters auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64)); int tb = std::max<int>(64 / tf, 1); dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb))); blocks.y = std::min<int>(blocks.y, 65535); dim3 threads(tf, tb); // Invoke kernel switch (activation) { case Activation::LeakyReLU: hipLaunchKernelGGL(( forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU>), dim3(blocks), dim3(threads), 0, stream, x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; case Activation::ELU: hipLaunchKernelGGL(( forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU>), dim3(blocks), dim3(threads), 0, stream, x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; case Activation::Identity: hipLaunchKernelGGL(( forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity>), dim3(blocks), dim3(threads), 0, stream, x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; } } template<typename scalar_t, typename prmscalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_template( const at::Tensor& y_act_, const at::Tensor& dy_act_, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { // Normalize shape and get dimensions auto y_act = normalize_shape(y_act_); auto dy_act = normalize_shape(dy_act_); auto num = y_act.size(0), chn = y_act.size(1), sp = y_act.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; auto acc_options = y_act.options(); if (y_act.scalar_type() == at::ScalarType::Half) { acc_options = acc_options.dtype(at::ScalarType::Float); } // Initialize output tensors auto xhat = at::empty_like(y_act); auto dy = at::empty_like(y_act); auto sum_dy = at::empty({chn}, acc_options); auto sum_xhat_dy = at::empty({chn}, acc_options); // Make accessors auto y_act_accessor = y_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_act_accessor = dy_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias); auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int block_y = std::min<int>(lastPow2(num), MAX_BLOCK_SIZE / 32); int block_x = std::min<int>(getNumThreads(sp), MAX_BLOCK_SIZE / block_y); const dim3 threads(block_x, block_y); const dim3 blocks(chn); // Invoke kernel switch (activation) { case Activation::LeakyReLU: hipLaunchKernelGGL(( backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU>), dim3(blocks), dim3(threads), 0, stream, y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; case Activation::ELU: hipLaunchKernelGGL(( backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU>), dim3(blocks), dim3(threads), 0, stream, y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; case Activation::Identity: hipLaunchKernelGGL(( backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity>), dim3(blocks), dim3(threads), 0, stream, y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; } return std::make_tuple(xhat.view(y_act_.sizes()), dy.view(y_act_.sizes()), sum_dy, sum_xhat_dy); } template<typename scalar_t, typename prmscalar_t, typename index_t> void backward_template(const at::Tensor& xhat_, at::Tensor& dy_, const at::Tensor& var, const at::Tensor& count, const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight, float eps) { // Normalize shape and get dimensions auto xhat = normalize_shape(xhat_); auto dy = normalize_shape(dy_); auto num = xhat.size(0), chn = xhat.size(1), sp = xhat.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; // Make accessors auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto count_accessor = count.packed_accessor<int64_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); // Kernel parameters auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64)); int tb = std::max<int>(64 / tf, 1); dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb))); blocks.y = std::min<int>(blocks.y, 65535); dim3 threads(tf, tb); // Invoke kernel hipLaunchKernelGGL(( backward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t>), dim3(blocks), dim3(threads), 0, stream, xhat_accessor, dy_accessor, var_accessor, count_accessor, sum_dy_accessor, sum_xhat_dy_accessor, weight_accessor, eps); } /*********************************************************************************************************************** * Interface methods **********************************************************************************************************************/ std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_cuda(const at::Tensor& x) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "statistics_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(x)) { return statistics_template<scalar_t, int32_t>(x); } else { return statistics_template<scalar_t, int64_t>(x); } }); } std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_cuda( const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) { return AT_DISPATCH_FLOATING_TYPES(all_mean.scalar_type(), "reduce_statistics_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(all_mean)) { return reduce_statistics_template<scalar_t, int32_t>(all_mean, all_var, all_count); } else { return reduce_statistics_template<scalar_t, int64_t>(all_mean, all_var, all_count); } }); } void forward_cuda(at::Tensor& x, const at::Tensor& mean, const at::Tensor& var, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : x.scalar_type(); DOUBLE_DISPATCH(x.scalar_type(), w_scalar_type, "forward_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(x)) { forward_template<scalar_t, prmscalar_t, int32_t>(x, mean, var, weight, bias, eps, activation, activation_param); } else { forward_template<scalar_t, prmscalar_t, int64_t>(x, mean, var, weight, bias, eps, activation, activation_param); } }); } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cuda( const at::Tensor& y_act, const at::Tensor& dy_act, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : y_act.scalar_type(); return DOUBLE_DISPATCH(y_act.scalar_type(), w_scalar_type, "backward_reduce_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(y_act)) { return backward_reduce_template<scalar_t, prmscalar_t, int32_t>( y_act, dy_act, weight, bias, eps, activation, activation_param); } else { return backward_reduce_template<scalar_t, prmscalar_t, int64_t>( y_act, dy_act, weight, bias, eps, activation, activation_param); } }); } void backward_cuda(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, const at::Tensor& count, const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight, float eps) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : xhat.scalar_type(); return DOUBLE_DISPATCH(xhat.scalar_type(), w_scalar_type, "backward_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(xhat)) { backward_template<scalar_t, prmscalar_t, int32_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps); } else { backward_template<scalar_t, prmscalar_t, int64_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps); } }); }
57ca6f265c8f5eee913e7ea359c92ae2634fc23b.cu
// Copyright (c) Facebook, Inc. and its affiliates. #include <tuple> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <c10/cuda/CUDAStream.h> #include <c10/util/Optional.h> #include "inplace_abn.h" #include "utils.h" #include "cuda_utils.cuh" #include "inplace_abn_kernels.cuh" #include "dispatch.h" /*********************************************************************************************************************** * Templated implementations **********************************************************************************************************************/ template<typename scalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_template(const at::Tensor& x_) { // Normalize shape and get dimensions auto x = normalize_shape(x_); auto num = x.size(0), chn = x.size(1), sp = x.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; auto acc_options = x.options(); if (x.scalar_type() == at::ScalarType::Half) { acc_options = acc_options.dtype(at::ScalarType::Float); } // Initialize output tensors auto mean = at::empty({chn}, acc_options); auto var = at::empty({chn}, acc_options); auto count = at::full({1}, num * sp, x.options().dtype(at::ScalarType::Long)); // Make accessors auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(chn); int tf = getNumThreads(sp); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE / tf)); // Invoke kernel statistics_kernel<scalar_t, accscalar_t, index_t><<<blocks, threads, 0, stream>>>( x_accessor, mean_accessor, var_accessor); return std::make_tuple(mean, var, count); } template<typename scalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_template( const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) { auto num = all_mean.size(0), chn = all_mean.size(1); // Initialize output tensors auto mean = at::empty({chn}, all_mean.options()); auto var = at::empty({chn}, all_var.options()); auto count = all_count.sum({0}); // Make accessors auto all_mean_accessor = all_mean.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>(); auto all_var_accessor = all_var.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>(); auto all_count_accessor = all_count.packed_accessor<int64_t, 2, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::cuda::getCurrentCUDAStream(); int threads = getNumThreads(chn); int blocks = std::max<int>(1, chn / threads); // Invoke kernel reduce_statistics_kernel<scalar_t, index_t><<<blocks, threads, 0, stream>>>( all_mean_accessor, all_var_accessor, all_count_accessor, mean_accessor, var_accessor); return std::make_tuple(mean, var, count); } template<typename scalar_t, typename prmscalar_t, typename index_t> void forward_template(at::Tensor& x_, const at::Tensor& mean, const at::Tensor& var, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { // Normalize shape and get dimensions auto x = normalize_shape(x_); auto num = x.size(0), chn = x.size(1), sp = x.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; // Make accessors auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias); // Kernel parameters auto stream = at::cuda::getCurrentCUDAStream(); int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64)); int tb = std::max<int>(64 / tf, 1); dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb))); blocks.y = std::min<int>(blocks.y, 65535); dim3 threads(tf, tb); // Invoke kernel switch (activation) { case Activation::LeakyReLU: forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU><<<blocks, threads, 0, stream>>>( x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; case Activation::ELU: forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU><<<blocks, threads, 0, stream>>>( x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; case Activation::Identity: forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity><<<blocks, threads, 0, stream>>>( x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param); break; } } template<typename scalar_t, typename prmscalar_t, typename index_t> std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_template( const at::Tensor& y_act_, const at::Tensor& dy_act_, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { // Normalize shape and get dimensions auto y_act = normalize_shape(y_act_); auto dy_act = normalize_shape(dy_act_); auto num = y_act.size(0), chn = y_act.size(1), sp = y_act.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; auto acc_options = y_act.options(); if (y_act.scalar_type() == at::ScalarType::Half) { acc_options = acc_options.dtype(at::ScalarType::Float); } // Initialize output tensors auto xhat = at::empty_like(y_act); auto dy = at::empty_like(y_act); auto sum_dy = at::empty({chn}, acc_options); auto sum_xhat_dy = at::empty({chn}, acc_options); // Make accessors auto y_act_accessor = y_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_act_accessor = dy_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias); auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); // Kernel parameters auto stream = at::cuda::getCurrentCUDAStream(); int block_y = std::min<int>(lastPow2(num), MAX_BLOCK_SIZE / 32); int block_x = std::min<int>(getNumThreads(sp), MAX_BLOCK_SIZE / block_y); const dim3 threads(block_x, block_y); const dim3 blocks(chn); // Invoke kernel switch (activation) { case Activation::LeakyReLU: backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU><<<blocks, threads, 0, stream>>>( y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; case Activation::ELU: backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU><<<blocks, threads, 0, stream>>>( y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; case Activation::Identity: backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity><<<blocks, threads, 0, stream>>>( y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor, eps, activation_param); break; } return std::make_tuple(xhat.view(y_act_.sizes()), dy.view(y_act_.sizes()), sum_dy, sum_xhat_dy); } template<typename scalar_t, typename prmscalar_t, typename index_t> void backward_template(const at::Tensor& xhat_, at::Tensor& dy_, const at::Tensor& var, const at::Tensor& count, const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight, float eps) { // Normalize shape and get dimensions auto xhat = normalize_shape(xhat_); auto dy = normalize_shape(dy_); auto num = xhat.size(0), chn = xhat.size(1), sp = xhat.size(2); // Type handling using accscalar_t = at::acc_type<scalar_t, true>; // Make accessors auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>(); auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto count_accessor = count.packed_accessor<int64_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>(); auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight); // Kernel parameters auto stream = at::cuda::getCurrentCUDAStream(); int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64)); int tb = std::max<int>(64 / tf, 1); dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb))); blocks.y = std::min<int>(blocks.y, 65535); dim3 threads(tf, tb); // Invoke kernel backward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t><<<blocks, threads, 0, stream>>>( xhat_accessor, dy_accessor, var_accessor, count_accessor, sum_dy_accessor, sum_xhat_dy_accessor, weight_accessor, eps); } /*********************************************************************************************************************** * Interface methods **********************************************************************************************************************/ std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_cuda(const at::Tensor& x) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "statistics_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(x)) { return statistics_template<scalar_t, int32_t>(x); } else { return statistics_template<scalar_t, int64_t>(x); } }); } std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_cuda( const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) { return AT_DISPATCH_FLOATING_TYPES(all_mean.scalar_type(), "reduce_statistics_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(all_mean)) { return reduce_statistics_template<scalar_t, int32_t>(all_mean, all_var, all_count); } else { return reduce_statistics_template<scalar_t, int64_t>(all_mean, all_var, all_count); } }); } void forward_cuda(at::Tensor& x, const at::Tensor& mean, const at::Tensor& var, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : x.scalar_type(); DOUBLE_DISPATCH(x.scalar_type(), w_scalar_type, "forward_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(x)) { forward_template<scalar_t, prmscalar_t, int32_t>(x, mean, var, weight, bias, eps, activation, activation_param); } else { forward_template<scalar_t, prmscalar_t, int64_t>(x, mean, var, weight, bias, eps, activation, activation_param); } }); } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cuda( const at::Tensor& y_act, const at::Tensor& dy_act, const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : y_act.scalar_type(); return DOUBLE_DISPATCH(y_act.scalar_type(), w_scalar_type, "backward_reduce_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(y_act)) { return backward_reduce_template<scalar_t, prmscalar_t, int32_t>( y_act, dy_act, weight, bias, eps, activation, activation_param); } else { return backward_reduce_template<scalar_t, prmscalar_t, int64_t>( y_act, dy_act, weight, bias, eps, activation, activation_param); } }); } void backward_cuda(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, const at::Tensor& count, const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight, float eps) { const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : xhat.scalar_type(); return DOUBLE_DISPATCH(xhat.scalar_type(), w_scalar_type, "backward_cuda", [&] { if (at::cuda::detail::canUse32BitIndexMath(xhat)) { backward_template<scalar_t, prmscalar_t, int32_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps); } else { backward_template<scalar_t, prmscalar_t, int64_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps); } }); }
graph_executor.hip
// !!! This is a file automatically generated by hipify!!! // $ nvcc -std=c++11 --expt-extended-lambda -I.. graph_executor.cu #include <iostream> #include <array> #include <graph_executor.hpp> #include <void_sender.hpp> #include <when_all.hpp> int main() { // Create a CUDA stream to launch kernels on. hipStream_t stream; hipStreamCreate(&stream); // Create a graph_executor from the stream. graph_executor ex(stream); // graph_executor works by constructing a graph of dependent kernels. // Nodes in the graph are represented as "Senders", which are an [experimental // C++ proposal](https://wg21.link/P1194) under consideration for future standardization. // This example program will create a simple diamond-shaped graph structure: // // root_node // / \ // node_a node_b // \ / // node_c // // In this graph, the direction of time flows from the top to the bottom. // The execution of work at node_a and node_b depend on the completion of root_node's work, // while node_c depends on the completion of work at node_b and node_c. // To begin describing the graph, we define a root node with a "void_sender". // A void_sender depends on nothing, and does no actual work. It is a no-op. void_sender root_node; // Nodes A, B, and C will contain actual kernel launches. To keep things simple, // we'll launch a single thread at each of these nodes grid_index shape{dim3(1), dim3(1)}; // Nodes representing kernel launches are created using graph_executor::bulk_then_execute, // which defines a CUDA kernel launch dependent on another node. // Node A is represented with a "kernel_sender". This is a sender type which corresponds to a kernel launch. kernel_sender node_a = ex.bulk_then_execute( // The function object defining the kernel is bulk_then_execute's first parameter // We've used an inline __device__ lambda function here, but we could have also used a C++ functor. [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node A!\n", block_idx.x, thread_idx.x); }, // The shape of the kernel launch is the second parameter. shape, // The sender on which this kernel launch depends is the third parameter. root_node ); // Node B is defined similarly. kernel_sender node_b = ex.bulk_then_execute( [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node B!\n", block_idx.x, thread_idx.x); }, shape, root_node ); // Node C depends on both Nodes A & B. However, kernels launched through bulk_then_execute // only receive a single sender object as a dependency. // To represent multiple dependencies, we can create a "join_sender" with when_all. // when_all receives an executor and a collection of senders as parameters. // We can use std::array as our collection, but any type with .begin() and .end() iterators will do. // Since kernel_sender is a move-only type, we need to move them into std::array's constructor: std::array<kernel_sender,2> nodes_a_and_b{std::move(node_a), std::move(node_b)}; // Create a sender corresponding to both Nodes A & B's completion with when_all. join_sender when_nodes_a_and_b = when_all(ex, nodes_a_and_b); // Now, we can define Node C similarly to Nodes A & B: kernel_sender node_c = ex.bulk_then_execute( [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node C!\n", block_idx.x, thread_idx.x); }, shape, when_nodes_a_and_b ); // At this point, we have described our entire computation as a graph. // However, nothing will happen until we submit the graph for execution. // Submit the graph for execution by calling .submit() on the terminal node in the graph. node_c.submit(); // While the graph executes, we can do other work asynchronously on the host. std::cout << "Graph submitted for execution." << std::endl; // To synchronize with the completion of the graph, call .sync_wait() on the graph's terminal node. node_c.sync_wait(); // At this point, the hello world messages should have been printed to the terminal. Because they are // independent of one another in the graph, Nodes A & B's messages may be output in any order. // However, because Node C depended on both Nodes A & B, Node C's message should follow both // Nodes A & B's messages. // While the process of defining this simple graph may seem complex, we anticipate that conveniences // will simplify it in the future. // Finally, destroy the CUDA stream. hipStreamDestroy(stream); std::cout << "OK" << std::endl; }
graph_executor.cu
// $ nvcc -std=c++11 --expt-extended-lambda -I.. graph_executor.cu #include <iostream> #include <array> #include <graph_executor.hpp> #include <void_sender.hpp> #include <when_all.hpp> int main() { // Create a CUDA stream to launch kernels on. cudaStream_t stream; cudaStreamCreate(&stream); // Create a graph_executor from the stream. graph_executor ex(stream); // graph_executor works by constructing a graph of dependent kernels. // Nodes in the graph are represented as "Senders", which are an [experimental // C++ proposal](https://wg21.link/P1194) under consideration for future standardization. // This example program will create a simple diamond-shaped graph structure: // // root_node // / \ // node_a node_b // \ / // node_c // // In this graph, the direction of time flows from the top to the bottom. // The execution of work at node_a and node_b depend on the completion of root_node's work, // while node_c depends on the completion of work at node_b and node_c. // To begin describing the graph, we define a root node with a "void_sender". // A void_sender depends on nothing, and does no actual work. It is a no-op. void_sender root_node; // Nodes A, B, and C will contain actual kernel launches. To keep things simple, // we'll launch a single thread at each of these nodes grid_index shape{dim3(1), dim3(1)}; // Nodes representing kernel launches are created using graph_executor::bulk_then_execute, // which defines a CUDA kernel launch dependent on another node. // Node A is represented with a "kernel_sender". This is a sender type which corresponds to a kernel launch. kernel_sender node_a = ex.bulk_then_execute( // The function object defining the kernel is bulk_then_execute's first parameter // We've used an inline __device__ lambda function here, but we could have also used a C++ functor. [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node A!\n", block_idx.x, thread_idx.x); }, // The shape of the kernel launch is the second parameter. shape, // The sender on which this kernel launch depends is the third parameter. root_node ); // Node B is defined similarly. kernel_sender node_b = ex.bulk_then_execute( [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node B!\n", block_idx.x, thread_idx.x); }, shape, root_node ); // Node C depends on both Nodes A & B. However, kernels launched through bulk_then_execute // only receive a single sender object as a dependency. // To represent multiple dependencies, we can create a "join_sender" with when_all. // when_all receives an executor and a collection of senders as parameters. // We can use std::array as our collection, but any type with .begin() and .end() iterators will do. // Since kernel_sender is a move-only type, we need to move them into std::array's constructor: std::array<kernel_sender,2> nodes_a_and_b{std::move(node_a), std::move(node_b)}; // Create a sender corresponding to both Nodes A & B's completion with when_all. join_sender when_nodes_a_and_b = when_all(ex, nodes_a_and_b); // Now, we can define Node C similarly to Nodes A & B: kernel_sender node_c = ex.bulk_then_execute( [] __device__ (grid_index idx) { dim3 block_idx = idx[0]; dim3 thread_idx = idx[1]; printf("Hello, world from thread (%d, %d) of Node C!\n", block_idx.x, thread_idx.x); }, shape, when_nodes_a_and_b ); // At this point, we have described our entire computation as a graph. // However, nothing will happen until we submit the graph for execution. // Submit the graph for execution by calling .submit() on the terminal node in the graph. node_c.submit(); // While the graph executes, we can do other work asynchronously on the host. std::cout << "Graph submitted for execution." << std::endl; // To synchronize with the completion of the graph, call .sync_wait() on the graph's terminal node. node_c.sync_wait(); // At this point, the hello world messages should have been printed to the terminal. Because they are // independent of one another in the graph, Nodes A & B's messages may be output in any order. // However, because Node C depended on both Nodes A & B, Node C's message should follow both // Nodes A & B's messages. // While the process of defining this simple graph may seem complex, we anticipate that conveniences // will simplify it in the future. // Finally, destroy the CUDA stream. cudaStreamDestroy(stream); std::cout << "OK" << std::endl; }
e1b0ad0a4db0e49002e51049a037fb8304b40e26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ int d_vertex_count = 0; __device__ int d_egdes_count = 0; __global__ void histogramKernel(const edge2_t* __restrict__ d_batch, int* __restrict__ d_histogram, int* __restrict__ d_aux, int batch_size) { int id = blockIdx.x * 256 + threadIdx.x; int stride = gridDim.x * 256; for (int i = id; i < batch_size; i += stride) d_aux[i] = atomicAdd(d_histogram + d_batch[i].x, 1); } __global__ void detectKernel(const edge2_t* __restrict__ d_batch, int* __restrict__ d_histogram, int* __restrict__ d_aux, edge_t* __restrict__ d_ovesize_vertices, edge2_t* __restrict__ d_ovesize_edges, edge_t* __restrict__ d_edge_block, int batch_size, int threshold) { int id = blockIdx.x * 256 + threadIdx.x; int stride = gridDim.x * 256; int size = xlib::upper_approx_u<xlib::WARP_SIZE>(batch_size); const int REGS = 16; edge2_t edge_queue[REGS/2]; edge_t vertex_queue[REGS]; int edge_count = 0, vertex_count = 0; for (int i = id; i < size; i += stride) { if (i < batch_size) { edge2_t item = d_batch[i]; if (d_histogram[item.x] > threshold) { //printf("%d\t%d\n", item.x, item.y); edge_queue[edge_count++] = item; if (d_aux[i] == 0) vertex_queue[vertex_count++] = item.x; } else d_edge_block[d_aux[i]] = item.x; } if (__any(edge_count == REGS/2)) { xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(edge_queue, edge_count, d_ovesize_edges, &d_egdes_count); edge_count = 0; } if (__any(vertex_count == REGS)) { xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(vertex_queue, vertex_count, d_ovesize_vertices, &d_vertex_count); vertex_count = 0; } } xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(edge_queue, edge_count, d_ovesize_edges, &d_egdes_count); xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(vertex_queue, vertex_count, d_ovesize_vertices, &d_vertex_count); } void detectTest(edge2_t* batch, int batch_size, int V, int threshold, bool debug) { edge2_t* d_batch, *d_ovesize_edges; int* d_histogram, *d_aux; edge_t* d_ovesize_vertices, *d_edge_block; SAFE_CALL( hipMalloc(&d_batch, batch_size * sizeof(edge2_t)) ); SAFE_CALL( hipMalloc(&d_histogram, V * sizeof(int)) ); SAFE_CALL( hipMalloc(&d_ovesize_vertices, batch_size * sizeof(edge_t)) ); SAFE_CALL( hipMalloc(&d_ovesize_edges, batch_size * sizeof(edge2_t)) ); SAFE_CALL( hipMalloc(&d_edge_block, batch_size * sizeof(edge_t)) ); SAFE_CALL( hipMalloc(&d_aux, batch_size * sizeof(int)) ); SAFE_CALL( hipMemset(d_histogram, 0x0, V * sizeof(int)) ); SAFE_CALL( hipMemset(d_aux, 0x0, batch_size * sizeof(int)) ); SAFE_CALL( hipMemcpy(d_batch, batch, batch_size * sizeof(edge2_t), hipMemcpyHostToDevice) ); //-------------------------------------------------------------------------- timer2::Timer<timer2::DEVICE> TM; TM.start(); hipLaunchKernelGGL(( histogramKernel), dim3(xlib::uceil_div<256>(batch_size)), dim3(256), 0, 0, d_batch, d_histogram, d_aux, batch_size); hipLaunchKernelGGL(( detectKernel), dim3(xlib::uceil_div<256>(batch_size)), dim3(256), 0, 0, d_batch, d_histogram, d_aux, d_ovesize_vertices, d_ovesize_edges, d_edge_block, batch_size, threshold); TM.stop(); TM.print("DetectKernel"); CHECK_CUDA_ERROR //-------------------------------------------------------------------------- int h_oversize; SAFE_CALL(hipMemcpyFromSymbol(&h_oversize, d_vertex_count, sizeof(int))); std::cout << " n. of oversize: " << h_oversize << "\n\n"; if (debug) { auto h_histogram = new int[V]; SAFE_CALL(hipMemcpy(h_histogram, d_histogram, V * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < V; i++) std::cout << h_histogram[i] << "\n"; std::cout << std::endl; delete[] h_histogram; auto h_aux = new int[batch_size]; SAFE_CALL(hipMemcpy(h_aux, d_aux, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) std::cout << h_aux[i] << "\n"; std::cout << std::endl; delete[] h_aux; /*auto h_aux = new int[batch_size]; SAFE_CALL(hipMemcpy(h_aux, d_aux, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) std::cout << h_aux[i] << "\n"; std::cout << std::endl; delete[] h_aux;*/ auto h_out = new edge_t[h_oversize]; SAFE_CALL(hipMemcpy(h_out, d_ovesize_vertices, h_oversize * sizeof(edge_t), hipMemcpyDeviceToHost)); for (int i = 0; i < h_oversize; i++) std::cout << h_out[i] << "\n"; std::cout << std::endl; delete[] h_out; } SAFE_CALL( hipFree(d_batch) ); SAFE_CALL( hipFree(d_histogram) ); SAFE_CALL( hipFree(d_ovesize_vertices) ); SAFE_CALL( hipFree(d_aux) ); }
e1b0ad0a4db0e49002e51049a037fb8304b40e26.cu
__device__ int d_vertex_count = 0; __device__ int d_egdes_count = 0; __global__ void histogramKernel(const edge2_t* __restrict__ d_batch, int* __restrict__ d_histogram, int* __restrict__ d_aux, int batch_size) { int id = blockIdx.x * 256 + threadIdx.x; int stride = gridDim.x * 256; for (int i = id; i < batch_size; i += stride) d_aux[i] = atomicAdd(d_histogram + d_batch[i].x, 1); } __global__ void detectKernel(const edge2_t* __restrict__ d_batch, int* __restrict__ d_histogram, int* __restrict__ d_aux, edge_t* __restrict__ d_ovesize_vertices, edge2_t* __restrict__ d_ovesize_edges, edge_t* __restrict__ d_edge_block, int batch_size, int threshold) { int id = blockIdx.x * 256 + threadIdx.x; int stride = gridDim.x * 256; int size = xlib::upper_approx_u<xlib::WARP_SIZE>(batch_size); const int REGS = 16; edge2_t edge_queue[REGS/2]; edge_t vertex_queue[REGS]; int edge_count = 0, vertex_count = 0; for (int i = id; i < size; i += stride) { if (i < batch_size) { edge2_t item = d_batch[i]; if (d_histogram[item.x] > threshold) { //printf("%d\t%d\n", item.x, item.y); edge_queue[edge_count++] = item; if (d_aux[i] == 0) vertex_queue[vertex_count++] = item.x; } else d_edge_block[d_aux[i]] = item.x; } if (__any(edge_count == REGS/2)) { xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(edge_queue, edge_count, d_ovesize_edges, &d_egdes_count); edge_count = 0; } if (__any(vertex_count == REGS)) { xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(vertex_queue, vertex_count, d_ovesize_vertices, &d_vertex_count); vertex_count = 0; } } xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(edge_queue, edge_count, d_ovesize_edges, &d_egdes_count); xlib::QueueWarp<xlib::cuQUEUE_MODE::SIMPLE> ::store(vertex_queue, vertex_count, d_ovesize_vertices, &d_vertex_count); } void detectTest(edge2_t* batch, int batch_size, int V, int threshold, bool debug) { edge2_t* d_batch, *d_ovesize_edges; int* d_histogram, *d_aux; edge_t* d_ovesize_vertices, *d_edge_block; SAFE_CALL( cudaMalloc(&d_batch, batch_size * sizeof(edge2_t)) ); SAFE_CALL( cudaMalloc(&d_histogram, V * sizeof(int)) ); SAFE_CALL( cudaMalloc(&d_ovesize_vertices, batch_size * sizeof(edge_t)) ); SAFE_CALL( cudaMalloc(&d_ovesize_edges, batch_size * sizeof(edge2_t)) ); SAFE_CALL( cudaMalloc(&d_edge_block, batch_size * sizeof(edge_t)) ); SAFE_CALL( cudaMalloc(&d_aux, batch_size * sizeof(int)) ); SAFE_CALL( cudaMemset(d_histogram, 0x0, V * sizeof(int)) ); SAFE_CALL( cudaMemset(d_aux, 0x0, batch_size * sizeof(int)) ); SAFE_CALL( cudaMemcpy(d_batch, batch, batch_size * sizeof(edge2_t), cudaMemcpyHostToDevice) ); //-------------------------------------------------------------------------- timer2::Timer<timer2::DEVICE> TM; TM.start(); histogramKernel<<<xlib::uceil_div<256>(batch_size), 256>>> (d_batch, d_histogram, d_aux, batch_size); detectKernel<<<xlib::uceil_div<256>(batch_size), 256>>> (d_batch, d_histogram, d_aux, d_ovesize_vertices, d_ovesize_edges, d_edge_block, batch_size, threshold); TM.stop(); TM.print("DetectKernel"); CHECK_CUDA_ERROR //-------------------------------------------------------------------------- int h_oversize; SAFE_CALL(cudaMemcpyFromSymbol(&h_oversize, d_vertex_count, sizeof(int))); std::cout << " n. of oversize: " << h_oversize << "\n\n"; if (debug) { auto h_histogram = new int[V]; SAFE_CALL(cudaMemcpy(h_histogram, d_histogram, V * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < V; i++) std::cout << h_histogram[i] << "\n"; std::cout << std::endl; delete[] h_histogram; auto h_aux = new int[batch_size]; SAFE_CALL(cudaMemcpy(h_aux, d_aux, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) std::cout << h_aux[i] << "\n"; std::cout << std::endl; delete[] h_aux; /*auto h_aux = new int[batch_size]; SAFE_CALL(cudaMemcpy(h_aux, d_aux, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) std::cout << h_aux[i] << "\n"; std::cout << std::endl; delete[] h_aux;*/ auto h_out = new edge_t[h_oversize]; SAFE_CALL(cudaMemcpy(h_out, d_ovesize_vertices, h_oversize * sizeof(edge_t), cudaMemcpyDeviceToHost)); for (int i = 0; i < h_oversize; i++) std::cout << h_out[i] << "\n"; std::cout << std::endl; delete[] h_out; } SAFE_CALL( cudaFree(d_batch) ); SAFE_CALL( cudaFree(d_histogram) ); SAFE_CALL( cudaFree(d_ovesize_vertices) ); SAFE_CALL( cudaFree(d_aux) ); }
4498d8359861358cd01dfa9177c7b6af61911ff7.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/iterator/discard_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/eigen.h" namespace cupoch { namespace geometry { namespace { __constant__ float voxel_offset[7][3] = {{0, 0, 0}, {1, 0, 0}, {-1, 0, 0}, {0, 1, 0}, {0, -1, 0}, {0, 0, 1}, {0, 0, -1}}; struct extract_range_voxels_functor { extract_range_voxels_functor(const Eigen::Vector3i& extents, int resolution, const Eigen::Vector3i& min_bound) : extents_(extents), resolution_(resolution), min_bound_(min_bound){}; const Eigen::Vector3i extents_; const int resolution_; const Eigen::Vector3i min_bound_; __device__ int operator()(size_t idx) const { int x = idx / (extents_[1] * extents_[2]); int yz = idx % (extents_[1] * extents_[2]); int y = yz / extents_[2]; int z = yz % extents_[2]; Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z); return IndexOf(gidx, resolution_); } }; struct compute_intersect_voxel_segment_functor { compute_intersect_voxel_segment_functor( const Eigen::Vector3f* points, const Eigen::Vector3f* steps, const Eigen::Vector3f& viewpoint, const Eigen::Vector3i& half_resolution, float voxel_size, const Eigen::Vector3f& origin, int n_div) : points_(points), steps_(steps), viewpoint_(viewpoint), half_resolution_(half_resolution), voxel_size_(voxel_size), box_half_size_(Eigen::Vector3f( voxel_size / 2, voxel_size / 2, voxel_size / 2)), origin_(origin), n_div_(n_div){}; const Eigen::Vector3f* points_; const Eigen::Vector3f* steps_; const Eigen::Vector3f viewpoint_; const Eigen::Vector3i half_resolution_; const float voxel_size_; const Eigen::Vector3f box_half_size_; const Eigen::Vector3f origin_; const int n_div_; __device__ Eigen::Vector3i operator()(size_t idx) { int pidx = idx / (n_div_ * 7); int svidx = idx % (n_div_ * 7); int sidx = svidx / 7; int vidx = svidx % 7; Eigen::Vector3f center = sidx * steps_[pidx] + viewpoint_; Eigen::Vector3f voxel_idx = Eigen::device_vectorize<float, 3, ::floor>( (center - origin_) / voxel_size_); Eigen::Vector3f voxel_center = voxel_size_ * (voxel_idx + Eigen::Vector3f(voxel_offset[vidx][0], voxel_offset[vidx][1], voxel_offset[vidx][2])); bool is_intersect = intersection_test::LineSegmentAABB( viewpoint_, points_[pidx], voxel_center - box_half_size_, voxel_center + box_half_size_); return (is_intersect) ? voxel_idx.cast<int>() + half_resolution_ : Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX); } }; void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float voxel_size, int resolution, Eigen::Vector3f& origin, const utility::device_vector<Eigen::Vector3f>& steps, int n_div, utility::device_vector<Eigen::Vector3i>& free_voxels) { if (points.empty()) return; size_t n_points = points.size(); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); free_voxels.resize(n_div * n_points * 7); compute_intersect_voxel_segment_functor func( thrust::raw_pointer_cast(points.data()), thrust::raw_pointer_cast(steps.data()), viewpoint, half_resolution, voxel_size, origin, n_div); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_div * n_points * 7), free_voxels.begin(), func); auto end1 = thrust::remove_if( free_voxels.begin(), free_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); free_voxels.resize(thrust::distance(free_voxels.begin(), end1)); thrust::sort(free_voxels.begin(), free_voxels.end()); auto end2 = thrust::unique(free_voxels.begin(), free_voxels.end()); free_voxels.resize(thrust::distance(free_voxels.begin(), end2)); } struct create_occupancy_voxels_functor { create_occupancy_voxels_functor(const Eigen::Vector3f& origin, const Eigen::Vector3i& half_resolution, float voxel_size) : origin_(origin), half_resolution_(half_resolution), voxel_size_(voxel_size){}; const Eigen::Vector3f origin_; const Eigen::Vector3i half_resolution_; const float voxel_size_; __device__ Eigen::Vector3i operator()( const thrust::tuple<Eigen::Vector3f, bool>& x) const { const Eigen::Vector3f& point = thrust::get<0>(x); bool hit_flag = thrust::get<1>(x); Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_; return (hit_flag) ? Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>() + half_resolution_ : Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX); ; } }; void ComputeOccupiedVoxels( const utility::device_vector<Eigen::Vector3f>& points, const utility::device_vector<bool> hit_flags, float voxel_size, int resolution, Eigen::Vector3f& origin, utility::device_vector<Eigen::Vector3i>& occupied_voxels) { occupied_voxels.resize(points.size()); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); create_occupancy_voxels_functor func(origin, half_resolution, voxel_size); thrust::transform(make_tuple_begin(points, hit_flags), make_tuple_end(points, hit_flags), occupied_voxels.begin(), func); auto end1 = thrust::remove_if( occupied_voxels.begin(), occupied_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1)); thrust::sort(occupied_voxels.begin(), occupied_voxels.end()); auto end2 = thrust::unique(occupied_voxels.begin(), occupied_voxels.end()); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2)); } struct add_occupancy_functor { add_occupancy_functor(OccupancyVoxel* voxels, int resolution, float clamping_thres_min, float clamping_thres_max, float prob_miss_log, float prob_hit_log, bool occupied) : voxels_(voxels), resolution_(resolution), clamping_thres_min_(clamping_thres_min), clamping_thres_max_(clamping_thres_max), prob_miss_log_(prob_miss_log), prob_hit_log_(prob_hit_log), occupied_(occupied){}; OccupancyVoxel* voxels_; const int resolution_; const float clamping_thres_min_; const float clamping_thres_max_; const float prob_miss_log_; const float prob_hit_log_; const bool occupied_; __device__ void operator()(const Eigen::Vector3i& voxel) { size_t idx = IndexOf(voxel, resolution_); float p = voxels_[idx].prob_log_; p = (isnan(p)) ? 0 : p; p += (occupied_) ? prob_hit_log_ : prob_miss_log_; voxels_[idx].prob_log_ = min(max(p, clamping_thres_min_), clamping_thres_max_); voxels_[idx].grid_index_ = voxel.cast<unsigned short>(); } }; } // namespace template class DenseGrid<OccupancyVoxel>; OccupancyGrid::OccupancyGrid() : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, 0.05, 512, Eigen::Vector3f::Zero()), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::OccupancyGrid(float voxel_size, int resolution, const Eigen::Vector3f& origin) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, voxel_size, resolution, origin), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::~OccupancyGrid() {} OccupancyGrid::OccupancyGrid(const OccupancyGrid& other) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other), min_bound_(other.min_bound_), max_bound_(other.max_bound_), clamping_thres_min_(other.clamping_thres_min_), clamping_thres_max_(other.clamping_thres_max_), prob_hit_log_(other.prob_hit_log_), prob_miss_log_(other.prob_miss_log_), occ_prob_thres_log_(other.occ_prob_thres_log_), visualize_free_area_(other.visualize_free_area_) {} OccupancyGrid& OccupancyGrid::Clear() { DenseGrid::Clear(); min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); return *this; } Eigen::Vector3f OccupancyGrid::GetMinBound() const { return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2)) .cast<float>() * voxel_size_ - origin_; } Eigen::Vector3f OccupancyGrid::GetMaxBound() const { return (max_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2 - 1)) .cast<float>() * voxel_size_ - origin_; } bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return false; OccupancyVoxel voxel = voxels_[idx]; return !std::isnan(voxel.prob_log_) && voxel.prob_log_ > occ_prob_thres_log_; } bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return true; OccupancyVoxel voxel = voxels_[idx]; return std::isnan(voxel.prob_log_); } thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel( const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel()); OccupancyVoxel voxel = voxels_[idx]; return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractBoundVoxels() const { Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>(); out->resize(diff[0] * diff[1] * diff[2]); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); thrust::copy(thrust::make_permutation_iterator(voxels_.begin(), thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator(voxels_.begin(), thrust::make_transform_iterator(thrust::make_counting_iterator(out->size()), func)), out->begin()); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractKnownVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_); }; remove_if_vectors(remove_fn, *out); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractFreeVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_) || v.prob_log_ > th; }; remove_if_vectors(remove_fn, *out); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractOccupiedVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_) || v.prob_log_ <= th; }; remove_if_vectors(remove_fn, *out); return out; } OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) { DenseGrid::Reconstruct(voxel_size, resolution); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { if (points.empty()) return *this; utility::device_vector<Eigen::Vector3f> ranged_points(points.size()); utility::device_vector<float> ranged_dists(points.size()); utility::device_vector<bool> hit_flags(points.size()); thrust::transform( points.begin(), points.end(), make_tuple_begin(ranged_points, ranged_dists, hit_flags), [viewpoint, max_range] __device__(const Eigen::Vector3f& pt) { Eigen::Vector3f pt_vp = pt - viewpoint; float dist = pt_vp.norm(); bool is_hit = max_range < 0 || dist <= max_range; return thrust::make_tuple( (is_hit) ? pt : viewpoint + pt_vp / dist * max_range, (is_hit) ? dist : max_range, is_hit); }); float max_dist = *(thrust::max_element(ranged_dists.begin(), ranged_dists.end())); int n_div = int(::ceil(max_dist / voxel_size_)); utility::device_vector<Eigen::Vector3i> free_voxels; utility::device_vector<Eigen::Vector3i> occupied_voxels; if (n_div > 0) { utility::device_vector<Eigen::Vector3f> steps(points.size()); thrust::transform( ranged_points.begin(), ranged_points.end(), steps.begin(), [viewpoint, n_div] __device__(const Eigen::Vector3f& pt) { return (pt - viewpoint) / n_div; }); // comupute free voxels ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_, origin_, steps, n_div + 1, free_voxels); } else { thrust::copy(points.begin(), points.end(), ranged_points.begin()); thrust::fill(hit_flags.begin(), hit_flags.end(), true); } // compute occupied voxels ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_, origin_, occupied_voxels); if (n_div > 0) { utility::device_vector<Eigen::Vector3i> free_voxels_res( free_voxels.size()); auto end = thrust::set_difference( free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(), occupied_voxels.end(), free_voxels_res.begin()); free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end)); AddVoxels(free_voxels_res, false); } AddVoxels(occupied_voxels, true); return *this; } OccupancyGrid& OccupancyGrid::Insert( const thrust::host_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { utility::device_vector<Eigen::Vector3f> dev_points = points; return Insert(dev_points, viewpoint, max_range); } OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud, const Eigen::Vector3f& viewpoint, float max_range) { Insert(pointcloud.points_, viewpoint, max_range); return *this; } OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel, bool occupied) { int idx = IndexOf(voxel, resolution_); size_t max_idx = resolution_ * resolution_ * resolution_; if (idx < 0 || idx >= max_idx) { utility::LogError( "[OccupancyGrid] a provided voxeld is not occupancy grid " "range."); return *this; } else { OccupancyVoxel org_ov = voxels_[idx]; if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0; org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_; org_ov.prob_log_ = ::min(::max(org_ov.prob_log_, clamping_thres_min_), clamping_thres_max_); org_ov.grid_index_ = voxel.cast<unsigned short>(); voxels_[idx] = org_ov; min_bound_ = min_bound_.array().min(org_ov.grid_index_.array()); max_bound_ = max_bound_.array().max(org_ov.grid_index_.array()); } return *this; } OccupancyGrid& OccupancyGrid::AddVoxels( const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) { if (voxels.empty()) return *this; Eigen::Vector3i fv = voxels.front(); Eigen::Vector3i bv = voxels.back(); Eigen::Vector3ui16 fvu = fv.cast<unsigned short>(); Eigen::Vector3ui16 bvu = bv.cast<unsigned short>(); min_bound_ = min_bound_.array().min(fvu.array()); min_bound_ = min_bound_.array().min(bvu.array()); max_bound_ = max_bound_.array().max(fvu.array()); max_bound_ = max_bound_.array().max(bvu.array()); add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, clamping_thres_min_, clamping_thres_max_, prob_miss_log_, prob_hit_log_, occupied); thrust::for_each(voxels.begin(), voxels.end(), func); return *this; } } // namespace geometry } // namespace cupoch
4498d8359861358cd01dfa9177c7b6af61911ff7.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/iterator/discard_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/eigen.h" namespace cupoch { namespace geometry { namespace { __constant__ float voxel_offset[7][3] = {{0, 0, 0}, {1, 0, 0}, {-1, 0, 0}, {0, 1, 0}, {0, -1, 0}, {0, 0, 1}, {0, 0, -1}}; struct extract_range_voxels_functor { extract_range_voxels_functor(const Eigen::Vector3i& extents, int resolution, const Eigen::Vector3i& min_bound) : extents_(extents), resolution_(resolution), min_bound_(min_bound){}; const Eigen::Vector3i extents_; const int resolution_; const Eigen::Vector3i min_bound_; __device__ int operator()(size_t idx) const { int x = idx / (extents_[1] * extents_[2]); int yz = idx % (extents_[1] * extents_[2]); int y = yz / extents_[2]; int z = yz % extents_[2]; Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z); return IndexOf(gidx, resolution_); } }; struct compute_intersect_voxel_segment_functor { compute_intersect_voxel_segment_functor( const Eigen::Vector3f* points, const Eigen::Vector3f* steps, const Eigen::Vector3f& viewpoint, const Eigen::Vector3i& half_resolution, float voxel_size, const Eigen::Vector3f& origin, int n_div) : points_(points), steps_(steps), viewpoint_(viewpoint), half_resolution_(half_resolution), voxel_size_(voxel_size), box_half_size_(Eigen::Vector3f( voxel_size / 2, voxel_size / 2, voxel_size / 2)), origin_(origin), n_div_(n_div){}; const Eigen::Vector3f* points_; const Eigen::Vector3f* steps_; const Eigen::Vector3f viewpoint_; const Eigen::Vector3i half_resolution_; const float voxel_size_; const Eigen::Vector3f box_half_size_; const Eigen::Vector3f origin_; const int n_div_; __device__ Eigen::Vector3i operator()(size_t idx) { int pidx = idx / (n_div_ * 7); int svidx = idx % (n_div_ * 7); int sidx = svidx / 7; int vidx = svidx % 7; Eigen::Vector3f center = sidx * steps_[pidx] + viewpoint_; Eigen::Vector3f voxel_idx = Eigen::device_vectorize<float, 3, ::floor>( (center - origin_) / voxel_size_); Eigen::Vector3f voxel_center = voxel_size_ * (voxel_idx + Eigen::Vector3f(voxel_offset[vidx][0], voxel_offset[vidx][1], voxel_offset[vidx][2])); bool is_intersect = intersection_test::LineSegmentAABB( viewpoint_, points_[pidx], voxel_center - box_half_size_, voxel_center + box_half_size_); return (is_intersect) ? voxel_idx.cast<int>() + half_resolution_ : Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX); } }; void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float voxel_size, int resolution, Eigen::Vector3f& origin, const utility::device_vector<Eigen::Vector3f>& steps, int n_div, utility::device_vector<Eigen::Vector3i>& free_voxels) { if (points.empty()) return; size_t n_points = points.size(); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); free_voxels.resize(n_div * n_points * 7); compute_intersect_voxel_segment_functor func( thrust::raw_pointer_cast(points.data()), thrust::raw_pointer_cast(steps.data()), viewpoint, half_resolution, voxel_size, origin, n_div); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_div * n_points * 7), free_voxels.begin(), func); auto end1 = thrust::remove_if( free_voxels.begin(), free_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); free_voxels.resize(thrust::distance(free_voxels.begin(), end1)); thrust::sort(free_voxels.begin(), free_voxels.end()); auto end2 = thrust::unique(free_voxels.begin(), free_voxels.end()); free_voxels.resize(thrust::distance(free_voxels.begin(), end2)); } struct create_occupancy_voxels_functor { create_occupancy_voxels_functor(const Eigen::Vector3f& origin, const Eigen::Vector3i& half_resolution, float voxel_size) : origin_(origin), half_resolution_(half_resolution), voxel_size_(voxel_size){}; const Eigen::Vector3f origin_; const Eigen::Vector3i half_resolution_; const float voxel_size_; __device__ Eigen::Vector3i operator()( const thrust::tuple<Eigen::Vector3f, bool>& x) const { const Eigen::Vector3f& point = thrust::get<0>(x); bool hit_flag = thrust::get<1>(x); Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_; return (hit_flag) ? Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>() + half_resolution_ : Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX); ; } }; void ComputeOccupiedVoxels( const utility::device_vector<Eigen::Vector3f>& points, const utility::device_vector<bool> hit_flags, float voxel_size, int resolution, Eigen::Vector3f& origin, utility::device_vector<Eigen::Vector3i>& occupied_voxels) { occupied_voxels.resize(points.size()); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); create_occupancy_voxels_functor func(origin, half_resolution, voxel_size); thrust::transform(make_tuple_begin(points, hit_flags), make_tuple_end(points, hit_flags), occupied_voxels.begin(), func); auto end1 = thrust::remove_if( occupied_voxels.begin(), occupied_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1)); thrust::sort(occupied_voxels.begin(), occupied_voxels.end()); auto end2 = thrust::unique(occupied_voxels.begin(), occupied_voxels.end()); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2)); } struct add_occupancy_functor { add_occupancy_functor(OccupancyVoxel* voxels, int resolution, float clamping_thres_min, float clamping_thres_max, float prob_miss_log, float prob_hit_log, bool occupied) : voxels_(voxels), resolution_(resolution), clamping_thres_min_(clamping_thres_min), clamping_thres_max_(clamping_thres_max), prob_miss_log_(prob_miss_log), prob_hit_log_(prob_hit_log), occupied_(occupied){}; OccupancyVoxel* voxels_; const int resolution_; const float clamping_thres_min_; const float clamping_thres_max_; const float prob_miss_log_; const float prob_hit_log_; const bool occupied_; __device__ void operator()(const Eigen::Vector3i& voxel) { size_t idx = IndexOf(voxel, resolution_); float p = voxels_[idx].prob_log_; p = (isnan(p)) ? 0 : p; p += (occupied_) ? prob_hit_log_ : prob_miss_log_; voxels_[idx].prob_log_ = min(max(p, clamping_thres_min_), clamping_thres_max_); voxels_[idx].grid_index_ = voxel.cast<unsigned short>(); } }; } // namespace template class DenseGrid<OccupancyVoxel>; OccupancyGrid::OccupancyGrid() : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, 0.05, 512, Eigen::Vector3f::Zero()), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::OccupancyGrid(float voxel_size, int resolution, const Eigen::Vector3f& origin) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, voxel_size, resolution, origin), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::~OccupancyGrid() {} OccupancyGrid::OccupancyGrid(const OccupancyGrid& other) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other), min_bound_(other.min_bound_), max_bound_(other.max_bound_), clamping_thres_min_(other.clamping_thres_min_), clamping_thres_max_(other.clamping_thres_max_), prob_hit_log_(other.prob_hit_log_), prob_miss_log_(other.prob_miss_log_), occ_prob_thres_log_(other.occ_prob_thres_log_), visualize_free_area_(other.visualize_free_area_) {} OccupancyGrid& OccupancyGrid::Clear() { DenseGrid::Clear(); min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); return *this; } Eigen::Vector3f OccupancyGrid::GetMinBound() const { return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2)) .cast<float>() * voxel_size_ - origin_; } Eigen::Vector3f OccupancyGrid::GetMaxBound() const { return (max_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2 - 1)) .cast<float>() * voxel_size_ - origin_; } bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return false; OccupancyVoxel voxel = voxels_[idx]; return !std::isnan(voxel.prob_log_) && voxel.prob_log_ > occ_prob_thres_log_; } bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return true; OccupancyVoxel voxel = voxels_[idx]; return std::isnan(voxel.prob_log_); } thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel( const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel()); OccupancyVoxel voxel = voxels_[idx]; return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractBoundVoxels() const { Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>(); out->resize(diff[0] * diff[1] * diff[2]); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); thrust::copy(thrust::make_permutation_iterator(voxels_.begin(), thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator(voxels_.begin(), thrust::make_transform_iterator(thrust::make_counting_iterator(out->size()), func)), out->begin()); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractKnownVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_); }; remove_if_vectors(remove_fn, *out); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractFreeVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_) || v.prob_log_ > th; }; remove_if_vectors(remove_fn, *out); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractOccupiedVoxels() const { auto out = ExtractBoundVoxels(); auto remove_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return isnan(v.prob_log_) || v.prob_log_ <= th; }; remove_if_vectors(remove_fn, *out); return out; } OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) { DenseGrid::Reconstruct(voxel_size, resolution); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { if (points.empty()) return *this; utility::device_vector<Eigen::Vector3f> ranged_points(points.size()); utility::device_vector<float> ranged_dists(points.size()); utility::device_vector<bool> hit_flags(points.size()); thrust::transform( points.begin(), points.end(), make_tuple_begin(ranged_points, ranged_dists, hit_flags), [viewpoint, max_range] __device__(const Eigen::Vector3f& pt) { Eigen::Vector3f pt_vp = pt - viewpoint; float dist = pt_vp.norm(); bool is_hit = max_range < 0 || dist <= max_range; return thrust::make_tuple( (is_hit) ? pt : viewpoint + pt_vp / dist * max_range, (is_hit) ? dist : max_range, is_hit); }); float max_dist = *(thrust::max_element(ranged_dists.begin(), ranged_dists.end())); int n_div = int(std::ceil(max_dist / voxel_size_)); utility::device_vector<Eigen::Vector3i> free_voxels; utility::device_vector<Eigen::Vector3i> occupied_voxels; if (n_div > 0) { utility::device_vector<Eigen::Vector3f> steps(points.size()); thrust::transform( ranged_points.begin(), ranged_points.end(), steps.begin(), [viewpoint, n_div] __device__(const Eigen::Vector3f& pt) { return (pt - viewpoint) / n_div; }); // comupute free voxels ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_, origin_, steps, n_div + 1, free_voxels); } else { thrust::copy(points.begin(), points.end(), ranged_points.begin()); thrust::fill(hit_flags.begin(), hit_flags.end(), true); } // compute occupied voxels ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_, origin_, occupied_voxels); if (n_div > 0) { utility::device_vector<Eigen::Vector3i> free_voxels_res( free_voxels.size()); auto end = thrust::set_difference( free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(), occupied_voxels.end(), free_voxels_res.begin()); free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end)); AddVoxels(free_voxels_res, false); } AddVoxels(occupied_voxels, true); return *this; } OccupancyGrid& OccupancyGrid::Insert( const thrust::host_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { utility::device_vector<Eigen::Vector3f> dev_points = points; return Insert(dev_points, viewpoint, max_range); } OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud, const Eigen::Vector3f& viewpoint, float max_range) { Insert(pointcloud.points_, viewpoint, max_range); return *this; } OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel, bool occupied) { int idx = IndexOf(voxel, resolution_); size_t max_idx = resolution_ * resolution_ * resolution_; if (idx < 0 || idx >= max_idx) { utility::LogError( "[OccupancyGrid] a provided voxeld is not occupancy grid " "range."); return *this; } else { OccupancyVoxel org_ov = voxels_[idx]; if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0; org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_; org_ov.prob_log_ = std::min(std::max(org_ov.prob_log_, clamping_thres_min_), clamping_thres_max_); org_ov.grid_index_ = voxel.cast<unsigned short>(); voxels_[idx] = org_ov; min_bound_ = min_bound_.array().min(org_ov.grid_index_.array()); max_bound_ = max_bound_.array().max(org_ov.grid_index_.array()); } return *this; } OccupancyGrid& OccupancyGrid::AddVoxels( const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) { if (voxels.empty()) return *this; Eigen::Vector3i fv = voxels.front(); Eigen::Vector3i bv = voxels.back(); Eigen::Vector3ui16 fvu = fv.cast<unsigned short>(); Eigen::Vector3ui16 bvu = bv.cast<unsigned short>(); min_bound_ = min_bound_.array().min(fvu.array()); min_bound_ = min_bound_.array().min(bvu.array()); max_bound_ = max_bound_.array().max(fvu.array()); max_bound_ = max_bound_.array().max(bvu.array()); add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, clamping_thres_min_, clamping_thres_max_, prob_miss_log_, prob_hit_log_, occupied); thrust::for_each(voxels.begin(), voxels.end(), func); return *this; } } // namespace geometry } // namespace cupoch
bd0491cd95840aa6c70771590d4ba6b5624cfca5.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2013, Texas State University-San Marcos. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University-San Marcos nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher (in collaboration with Ivan Zecena and Ziliang Zong) */ //compile //nvcc -I../include -O3 -w gpuLogToFile.cu -o gpuToFile -L/usr/lib64/nvidia -lnvidia-ml //only used for rainbow-panda server which has four devices #include <stdio.h> #include <unistd.h> #include <sys/time.h> #include <sys/types.h> #include <signal.h> #include "rocm_smi/rocm_smi.h" #define DEVICE0 0 #define DEVICE1 1 #define DEVICE2 2 #define DEVICE3 3 double secondsSince(struct timeval *startTime) { struct timeval currentTime; gettimeofday(&currentTime, NULL); return ((currentTime.tv_sec*1e6 + currentTime.tv_usec) - (startTime->tv_sec*1e6 + startTime->tv_usec)) / 1e6; } static inline double getTime() { struct timeval time; gettimeofday(&time, NULL); return time.tv_sec + time.tv_usec * 0.000001; } static void initAndTest(uint32_t *device0, uint32_t *device1, uint32_t *device2, uint32_t *device3) { rsmi_status_t result; nvmlMemory_t mem; unsigned int power; result = nvmlInit(); if (RSMI_STATUS_SUCCESS != result) { printf("failed to initialize NVML: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE0, device0); if (RSMI_STATUS_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE1, device1); if (RSMI_STATUS_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE2, device2); if (RSMI_STATUS_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE3, device3); if (RSMI_STATUS_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device0, &power); if (RSMI_STATUS_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device1, &power); if (RSMI_STATUS_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device2, &power); if (RSMI_STATUS_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device3, &power); if (RSMI_STATUS_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } } static inline void getInfo ( uint32_t device0, uint32_t device1, uint32_t device2, uint32_t device3, unsigned int *power0, unsigned int *power1, unsigned int *power2, unsigned int *power3, unsigned int *temp0, unsigned int *temp1, unsigned int *temp2, unsigned int *temp3, nvmlUtilization_t *u0, nvmlUtilization_t *u1, nvmlUtilization_t *u2, nvmlUtilization_t *u3, FILE* outputFile, struct timeval *startTime ) { nvmlDeviceGetPowerUsage(device0, power0); *power0 *= .001; nvmlDeviceGetPowerUsage(device1, power1); *power1 *= .001; nvmlDeviceGetPowerUsage(device2, power2); *power2 *= .001; nvmlDeviceGetPowerUsage(device3, power3); *power3 *= .001; nvmlDeviceGetTemperature(device0, NVML_TEMPERATURE_GPU, temp0); nvmlDeviceGetTemperature(device1, NVML_TEMPERATURE_GPU, temp1); nvmlDeviceGetTemperature(device2, NVML_TEMPERATURE_GPU, temp2); nvmlDeviceGetTemperature(device3, NVML_TEMPERATURE_GPU, temp3); nvmlDeviceGetUtilizationRates(device0, u0); nvmlDeviceGetUtilizationRates(device1, u1); nvmlDeviceGetUtilizationRates(device2, u2); nvmlDeviceGetUtilizationRates(device3, u3); unsigned int total_power; total_power = *power0 + *power1 + *power2 + *power3; struct timeval currentTime; double time_interval; gettimeofday(&currentTime, NULL); //get device utilization api only support fermi and quadro architrcture cards. //for panda server, it contains RTX 2080ti cards which don't have supportion. //util->gpu return gpu utilization, util->memory return gpu memory utilization time_interval = ((currentTime.tv_sec*1e6 + currentTime.tv_usec) - (startTime->tv_sec*1e6 + startTime->tv_usec)) / 1e6; fprintf(outputFile, "%f, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", secondsSince(startTime), total_power, *power0, *temp0, u0->gpu, *power1, *temp1, u1->gpu, *power2, *temp2, u2->gpu, *power3, *temp3, u3->gpu ); } static void sigterm_hdl(int sig) { nvmlShutdown(); exit(1); } int main(int argc, char *argv[]) { uint32_t device0, device1, device2, device3; unsigned int power0, power1, power2, power3, delay_us; unsigned int temp0, temp1, temp2, temp3; nvmlUtilization_t u0, u1, u2, u3; if (argc != 3 || atoi(argv[1]) <= 0) { fprintf(stderr, "Usage: %s [sampling rate (Hz)] [output filename]\n", argv[0]); return 1; } delay_us = 1e6 / atoi(argv[1]); char filename[512]; char hostname[9]; hostname[8] = NULL; gethostname(hostname, 8); snprintf(filename, 512, "%s_GPU-%s.csv", hostname, argv[2]); FILE *outputFile = fopen(filename, "w"); if (outputFile == NULL) { fprintf(stderr, "Unable to open output file.\n"); return 1; } setbuf(outputFile, NULL); if (delay_us <= 0) { fprintf(stderr, "[GPU meter]: Sampling delay must be a nonnegative integer."); return 1; } // SIGTERM handler struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = sigterm_hdl; if (sigaction(SIGTERM, &sa, 0)) { fprintf(stderr,"[GPU meter]: Sigaction failed.\n"); exit(1); } initAndTest(&device0, &device1, &device2, &device3); // We write this 'Y' to STDOUT so master_meter will know that we're ready to start logging. // The master meter will block until this has been read. char c = 'Y'; write(STDOUT_FILENO, &c, 1); fprintf(outputFile, "Time(S), Total(w), power0(W), temp0(C), util0, power1(W), temp1(C), util1, power2(W), temp2(C), util1, power3(W), temp3(C), util3\n"); // Begin power measurement. struct timeval start; gettimeofday(&start, NULL); do { usleep(delay_us); getInfo ( device0, device1, device2, device3, &power0, &power1, &power2, &power3, &temp0, &temp1, &temp2, &temp3, &u0, &u1, &u2, &u3, outputFile, &start ); } while(1); }
bd0491cd95840aa6c70771590d4ba6b5624cfca5.cu
/* Copyright (c) 2013, Texas State University-San Marcos. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University-San Marcos nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher (in collaboration with Ivan Zecena and Ziliang Zong) */ //compile //nvcc -I../include -O3 -w gpuLogToFile.cu -o gpuToFile -L/usr/lib64/nvidia -lnvidia-ml //only used for rainbow-panda server which has four devices #include <stdio.h> #include <unistd.h> #include <sys/time.h> #include <sys/types.h> #include <signal.h> #include "nvml.h" #define DEVICE0 0 #define DEVICE1 1 #define DEVICE2 2 #define DEVICE3 3 double secondsSince(struct timeval *startTime) { struct timeval currentTime; gettimeofday(&currentTime, NULL); return ((currentTime.tv_sec*1e6 + currentTime.tv_usec) - (startTime->tv_sec*1e6 + startTime->tv_usec)) / 1e6; } static inline double getTime() { struct timeval time; gettimeofday(&time, NULL); return time.tv_sec + time.tv_usec * 0.000001; } static void initAndTest(nvmlDevice_t *device0, nvmlDevice_t *device1, nvmlDevice_t *device2, nvmlDevice_t *device3) { nvmlReturn_t result; nvmlMemory_t mem; unsigned int power; result = nvmlInit(); if (NVML_SUCCESS != result) { printf("failed to initialize NVML: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE0, device0); if (NVML_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE1, device1); if (NVML_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE2, device2); if (NVML_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetHandleByIndex(DEVICE3, device3); if (NVML_SUCCESS != result) { printf("failed to get handle for device: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device0, &power); if (NVML_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device1, &power); if (NVML_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device2, &power); if (NVML_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } result = nvmlDeviceGetPowerUsage(*device3, &power); if (NVML_SUCCESS != result) { printf("failed to read power: %s\n", nvmlErrorString(result)); exit(1); } } static inline void getInfo ( nvmlDevice_t device0, nvmlDevice_t device1, nvmlDevice_t device2, nvmlDevice_t device3, unsigned int *power0, unsigned int *power1, unsigned int *power2, unsigned int *power3, unsigned int *temp0, unsigned int *temp1, unsigned int *temp2, unsigned int *temp3, nvmlUtilization_t *u0, nvmlUtilization_t *u1, nvmlUtilization_t *u2, nvmlUtilization_t *u3, FILE* outputFile, struct timeval *startTime ) { nvmlDeviceGetPowerUsage(device0, power0); *power0 *= .001; nvmlDeviceGetPowerUsage(device1, power1); *power1 *= .001; nvmlDeviceGetPowerUsage(device2, power2); *power2 *= .001; nvmlDeviceGetPowerUsage(device3, power3); *power3 *= .001; nvmlDeviceGetTemperature(device0, NVML_TEMPERATURE_GPU, temp0); nvmlDeviceGetTemperature(device1, NVML_TEMPERATURE_GPU, temp1); nvmlDeviceGetTemperature(device2, NVML_TEMPERATURE_GPU, temp2); nvmlDeviceGetTemperature(device3, NVML_TEMPERATURE_GPU, temp3); nvmlDeviceGetUtilizationRates(device0, u0); nvmlDeviceGetUtilizationRates(device1, u1); nvmlDeviceGetUtilizationRates(device2, u2); nvmlDeviceGetUtilizationRates(device3, u3); unsigned int total_power; total_power = *power0 + *power1 + *power2 + *power3; struct timeval currentTime; double time_interval; gettimeofday(&currentTime, NULL); //get device utilization api only support fermi and quadro architrcture cards. //for panda server, it contains RTX 2080ti cards which don't have supportion. //util->gpu return gpu utilization, util->memory return gpu memory utilization time_interval = ((currentTime.tv_sec*1e6 + currentTime.tv_usec) - (startTime->tv_sec*1e6 + startTime->tv_usec)) / 1e6; fprintf(outputFile, "%f, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", secondsSince(startTime), total_power, *power0, *temp0, u0->gpu, *power1, *temp1, u1->gpu, *power2, *temp2, u2->gpu, *power3, *temp3, u3->gpu ); } static void sigterm_hdl(int sig) { nvmlShutdown(); exit(1); } int main(int argc, char *argv[]) { nvmlDevice_t device0, device1, device2, device3; unsigned int power0, power1, power2, power3, delay_us; unsigned int temp0, temp1, temp2, temp3; nvmlUtilization_t u0, u1, u2, u3; if (argc != 3 || atoi(argv[1]) <= 0) { fprintf(stderr, "Usage: %s [sampling rate (Hz)] [output filename]\n", argv[0]); return 1; } delay_us = 1e6 / atoi(argv[1]); char filename[512]; char hostname[9]; hostname[8] = NULL; gethostname(hostname, 8); snprintf(filename, 512, "%s_GPU-%s.csv", hostname, argv[2]); FILE *outputFile = fopen(filename, "w"); if (outputFile == NULL) { fprintf(stderr, "Unable to open output file.\n"); return 1; } setbuf(outputFile, NULL); if (delay_us <= 0) { fprintf(stderr, "[GPU meter]: Sampling delay must be a nonnegative integer."); return 1; } // SIGTERM handler struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = sigterm_hdl; if (sigaction(SIGTERM, &sa, 0)) { fprintf(stderr,"[GPU meter]: Sigaction failed.\n"); exit(1); } initAndTest(&device0, &device1, &device2, &device3); // We write this 'Y' to STDOUT so master_meter will know that we're ready to start logging. // The master meter will block until this has been read. char c = 'Y'; write(STDOUT_FILENO, &c, 1); fprintf(outputFile, "Time(S), Total(w), power0(W), temp0(C), util0, power1(W), temp1(C), util1, power2(W), temp2(C), util1, power3(W), temp3(C), util3\n"); // Begin power measurement. struct timeval start; gettimeofday(&start, NULL); do { usleep(delay_us); getInfo ( device0, device1, device2, device3, &power0, &power1, &power2, &power3, &temp0, &temp1, &temp2, &temp3, &u0, &u1, &u2, &u3, outputFile, &start ); } while(1); }
1b519a966ca528e3095c80e3b570639967f86280.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by op2.py // //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ float gam; __constant__ float gm1; __constant__ float cfl; __constant__ float eps; __constant__ float mach; __constant__ float alpha; __constant__ float qinf[4]; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"gam")) { cutilSafeCall(hipMemcpyToSymbol(gam, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(hipMemcpyToSymbol(gm1, dat, dim*size)); } else if (!strcmp(name,"cfl")) { cutilSafeCall(hipMemcpyToSymbol(cfl, dat, dim*size)); } else if (!strcmp(name,"eps")) { cutilSafeCall(hipMemcpyToSymbol(eps, dat, dim*size)); } else if (!strcmp(name,"mach")) { cutilSafeCall(hipMemcpyToSymbol(mach, dat, dim*size)); } else if (!strcmp(name,"alpha")) { cutilSafeCall(hipMemcpyToSymbol(alpha, dat, dim*size)); } else if (!strcmp(name,"qinf")) { cutilSafeCall(hipMemcpyToSymbol(qinf, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "save_soln_kernel.cu" #include "adt_calc_kernel.hip" #include "res_calc_kernel.cu" #include "bres_calc_kernel.cu" #include "update_kernel.hip"
1b519a966ca528e3095c80e3b570639967f86280.cu
// // auto-generated by op2.py // //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ float gam; __constant__ float gm1; __constant__ float cfl; __constant__ float eps; __constant__ float mach; __constant__ float alpha; __constant__ float qinf[4]; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"gam")) { cutilSafeCall(cudaMemcpyToSymbol(gam, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(cudaMemcpyToSymbol(gm1, dat, dim*size)); } else if (!strcmp(name,"cfl")) { cutilSafeCall(cudaMemcpyToSymbol(cfl, dat, dim*size)); } else if (!strcmp(name,"eps")) { cutilSafeCall(cudaMemcpyToSymbol(eps, dat, dim*size)); } else if (!strcmp(name,"mach")) { cutilSafeCall(cudaMemcpyToSymbol(mach, dat, dim*size)); } else if (!strcmp(name,"alpha")) { cutilSafeCall(cudaMemcpyToSymbol(alpha, dat, dim*size)); } else if (!strcmp(name,"qinf")) { cutilSafeCall(cudaMemcpyToSymbol(qinf, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "save_soln_kernel.cu" #include "adt_calc_kernel.cu" #include "res_calc_kernel.cu" #include "bres_calc_kernel.cu" #include "update_kernel.cu"
6884842ea31ef35bb71343a1272bce8858bd4a60.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <inttypes.h> #include <parboil.h> #include <hip/hip_runtime.h> #include "sad.h" #include "sad4.h" #include "largerBlocks.h" #include "file.h" #include "image.h" #define CUDA_ERRCK \ {hipError_t err = hipGetLastError(); \ if (err) fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err)); \ } static unsigned short * load_sads(char *filename); static void write_sads(char *filename, int image_width_macroblocks, int image_height_macroblocks, unsigned short *sads); static void write_sads_directly(char *filename, int width, int height, unsigned short *sads); /* FILE I/O */ unsigned short * load_sads(char *filename) { FILE *infile; unsigned short *sads; int w; int h; int sads_per_block; infile = fopen(filename, "r"); if (!infile) { fprintf(stderr, "Cannot find file '%s'\n", filename); exit(-1); } /* Read image dimensions (measured in macroblocks) */ w = read16u(infile); h = read16u(infile); /* Read SAD values. Only interested in the 4x4 SAD values, which are * at the end of the file. */ sads_per_block = MAX_POS_PADDED * (w * h); fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR); sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short)); fread(sads, sizeof(unsigned short), sads_per_block * 16, infile); fclose(infile); return sads; } /* Compare the reference SADs to the expected SADs. */ void check_sads(unsigned short *sads_reference, unsigned short *sads_computed, int image_size_macroblocks) { int block; /* Check the 4x4 SAD values. These are in sads_reference. * Ignore the data at the beginning of sads_computed. */ sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks; for (block = 0; block < image_size_macroblocks; block++) { int subblock; for (subblock = 0; subblock < 16; subblock++) { int sad_index; for (sad_index = 0; sad_index < MAX_POS; sad_index++) { int index = (block * 16 + subblock) * MAX_POS_PADDED + sad_index; if (sads_reference[index] != sads_computed[index]) { #if 0 /* Print exactly where the mismatch was seen */ printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]); #else goto mismatch; #endif } } } } printf("Success.\n"); return; mismatch: printf("Computed SADs do not match expected values.\n"); } /* Extract the SAD data for a particular block type for a particular * macroblock from the array of SADs of that block type. */ static inline void write_subblocks(FILE *outfile, unsigned short *subblock_array, int macroblock, int count) { int block; int pos; for (block = 0; block < count; block++) { unsigned short *vec = subblock_array + (block + macroblock * count) * MAX_POS_PADDED; /* Write all SADs for this sub-block */ for (pos = 0; pos < MAX_POS; pos++) write16u(outfile, *vec++); } } /* Write some SAD data to a file for output checking. * * All SAD values for six rows of macroblocks are written. * The six rows consist of the top two, middle two, and bottom two image rows. */ void write_sads(char *filename, int mb_width, int mb_height, unsigned short *sads) { FILE *outfile = fopen(filename, "w"); int mbs = mb_width * mb_height; int row_indir; int row_indices[6] = {0, 1, mb_height / 2 - 1, mb_height / 2, mb_height - 2, mb_height - 1}; if (outfile == NULL) { fprintf(stderr, "Cannot open output file\n"); exit(-1); } /* Write the number of output macroblocks */ write32u(outfile, mb_width * 6); /* Write zeros */ write32u(outfile, 0); /* Each row */ for (row_indir = 0; row_indir < 6; row_indir++) { int row = row_indices[row_indir]; /* Each block in row */ int block; for (block = mb_width * row; block < mb_width * (row + 1); block++) { int blocktype; /* Write SADs for all sub-block types */ for (blocktype = 1; blocktype <= 7; blocktype++) write_subblocks(outfile, sads + SAD_TYPE_IX(blocktype, mbs), block, SAD_TYPE_CT(blocktype)); } } fclose(outfile); } /* FILE I/O for debugging */ static void write_sads_directly(char *filename, int width, int height, unsigned short *sads) { FILE *f = fopen(filename, "w"); int n; write16u(f, width); write16u(f, height); for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) { write16u(f, sads[n]); } fclose(f); } static void print_test_sad_vector(unsigned short *base, int macroblock, int count) { int n; int searchpos = 17*33+17; for (n = 0; n < count; n++) printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]); } static void print_test_sads(unsigned short *sads_computed, int mbs) { int macroblock = 5; int blocktype; for (blocktype = 1; blocktype <= 7; blocktype++) { printf("%d:", blocktype); print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs), macroblock, SAD_TYPE_CT(blocktype)); puts("\n"); } } /* MAIN */ int main(int argc, char **argv) { struct image_i16 *ref_image; struct image_i16 *cur_image; unsigned short *sads_computed; /* SADs generated by the program */ int image_size_bytes; int image_width_macroblocks, image_height_macroblocks; int image_size_macroblocks; struct pb_TimerSet timers; struct pb_Parameters *params; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if (pb_Parameters_CountInputs(params) != 2) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); } /* Read input files */ pb_SwitchToTimer(&timers, pb_TimerID_IO); ref_image = load_image(params->inpFiles[0]); cur_image = load_image(params->inpFiles[1]); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if ((ref_image->width != cur_image->width) || (ref_image->height != cur_image->height)) { fprintf(stderr, "Input images must be the same size\n"); exit(-1); } if ((ref_image->width % 16) || (ref_image->height % 16)) { fprintf(stderr, "Input image size must be an integral multiple of 16\n"); exit(-1); } /* Compute parameters, allocate memory */ image_size_bytes = ref_image->width * ref_image->height * sizeof(short); image_width_macroblocks = ref_image->width >> 4; image_height_macroblocks = ref_image->height >> 4; image_size_macroblocks = image_width_macroblocks * image_height_macroblocks; sads_computed = (unsigned short *) malloc(41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short)); /* Run the kernel code */ { struct hipArray *ref_ary; /* Reference image on the device */ short *d_cur_image; /* Current image on the device */ unsigned short *d_sads; /* SADs on the device */ dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks); pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMalloc((void **)&d_cur_image, image_size_bytes); CUDA_ERRCK hipMallocArray(&ref_ary, &get_ref().channelDesc, ref_image->width, ref_image->height); CUDA_ERRCK /* Transfer current image to device */ hipMemcpy(d_cur_image, cur_image->data, image_size_bytes, hipMemcpyHostToDevice); CUDA_ERRCK /* Transfer reference image to device */ hipMemcpy2DToArray(ref_ary, 0, 0, ref_image->data, ref_image->width * sizeof(unsigned short), ref_image->width * sizeof(unsigned short), ref_image->height, hipMemcpyHostToDevice); CUDA_ERRCK hipBindTextureToArray(get_ref(), ref_ary); CUDA_ERRCK /* Allocate SAD data on the device */ hipMalloc((void **)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK hipMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* std::cout memory footprint */ int memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads memory_footprint += image_size_bytes; //d_cur_image printf("\n#### mb_sad_calc memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ /* Run the 4x4 kernel */ hipLaunchKernelGGL(( mb_sad_calc), dim3(CEIL(ref_image->width / 4, THREADS_W), CEIL(ref_image->height / 4, THREADS_H)), dim3(dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H)), SAD_LOC_SIZE_BYTES, 0, d_sads, (unsigned short *)d_cur_image, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* std::cout memory footprint */ memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads printf("\n#### larger_sad_calc_8 memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ /* Run the larger-blocks kernels */ hipLaunchKernelGGL(( larger_sad_calc_8), dim3(macroblock_grid), dim3(dim3(32, 4)), 0, 0, d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* std::cout memory footprint */ memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads printf("\n#### larger_sad_calc_16 memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ hipLaunchKernelGGL(( larger_sad_calc_16), dim3(macroblock_grid), dim3(dim3(32, 1)), 0, 0, d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Transfer SAD data to the host */ hipMemcpy(sads_computed,// + 25 * MAX_POS_PADDED * image_size_macroblocks, d_sads,// + 25 * MAX_POS_PADDED * image_size_macroblocks, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short) , hipMemcpyDeviceToHost); CUDA_ERRCK /* Free GPU memory */ hipFree(d_sads); CUDA_ERRCK hipUnbindTexture(get_ref()); CUDA_ERRCK hipFreeArray(ref_ary); CUDA_ERRCK hipFree(d_cur_image); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print output */ if (params->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); write_sads(params->outFile, image_width_macroblocks, image_height_macroblocks, sads_computed); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } #if 0 /* Debugging */ print_test_sads(sads_computed, image_size_macroblocks); write_sads_directly("sad-debug.bin", ref_image->width / 16, ref_image->height / 16, sads_computed); #endif /* Free memory */ free(sads_computed); free_image(ref_image); free_image(cur_image); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
6884842ea31ef35bb71343a1272bce8858bd4a60.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <inttypes.h> #include <parboil.h> #include <cuda.h> #include "sad.h" #include "sad4.h" #include "largerBlocks.h" #include "file.h" #include "image.h" #define CUDA_ERRCK \ {cudaError_t err = cudaGetLastError(); \ if (err) fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); \ } static unsigned short * load_sads(char *filename); static void write_sads(char *filename, int image_width_macroblocks, int image_height_macroblocks, unsigned short *sads); static void write_sads_directly(char *filename, int width, int height, unsigned short *sads); /* FILE I/O */ unsigned short * load_sads(char *filename) { FILE *infile; unsigned short *sads; int w; int h; int sads_per_block; infile = fopen(filename, "r"); if (!infile) { fprintf(stderr, "Cannot find file '%s'\n", filename); exit(-1); } /* Read image dimensions (measured in macroblocks) */ w = read16u(infile); h = read16u(infile); /* Read SAD values. Only interested in the 4x4 SAD values, which are * at the end of the file. */ sads_per_block = MAX_POS_PADDED * (w * h); fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR); sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short)); fread(sads, sizeof(unsigned short), sads_per_block * 16, infile); fclose(infile); return sads; } /* Compare the reference SADs to the expected SADs. */ void check_sads(unsigned short *sads_reference, unsigned short *sads_computed, int image_size_macroblocks) { int block; /* Check the 4x4 SAD values. These are in sads_reference. * Ignore the data at the beginning of sads_computed. */ sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks; for (block = 0; block < image_size_macroblocks; block++) { int subblock; for (subblock = 0; subblock < 16; subblock++) { int sad_index; for (sad_index = 0; sad_index < MAX_POS; sad_index++) { int index = (block * 16 + subblock) * MAX_POS_PADDED + sad_index; if (sads_reference[index] != sads_computed[index]) { #if 0 /* Print exactly where the mismatch was seen */ printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]); #else goto mismatch; #endif } } } } printf("Success.\n"); return; mismatch: printf("Computed SADs do not match expected values.\n"); } /* Extract the SAD data for a particular block type for a particular * macroblock from the array of SADs of that block type. */ static inline void write_subblocks(FILE *outfile, unsigned short *subblock_array, int macroblock, int count) { int block; int pos; for (block = 0; block < count; block++) { unsigned short *vec = subblock_array + (block + macroblock * count) * MAX_POS_PADDED; /* Write all SADs for this sub-block */ for (pos = 0; pos < MAX_POS; pos++) write16u(outfile, *vec++); } } /* Write some SAD data to a file for output checking. * * All SAD values for six rows of macroblocks are written. * The six rows consist of the top two, middle two, and bottom two image rows. */ void write_sads(char *filename, int mb_width, int mb_height, unsigned short *sads) { FILE *outfile = fopen(filename, "w"); int mbs = mb_width * mb_height; int row_indir; int row_indices[6] = {0, 1, mb_height / 2 - 1, mb_height / 2, mb_height - 2, mb_height - 1}; if (outfile == NULL) { fprintf(stderr, "Cannot open output file\n"); exit(-1); } /* Write the number of output macroblocks */ write32u(outfile, mb_width * 6); /* Write zeros */ write32u(outfile, 0); /* Each row */ for (row_indir = 0; row_indir < 6; row_indir++) { int row = row_indices[row_indir]; /* Each block in row */ int block; for (block = mb_width * row; block < mb_width * (row + 1); block++) { int blocktype; /* Write SADs for all sub-block types */ for (blocktype = 1; blocktype <= 7; blocktype++) write_subblocks(outfile, sads + SAD_TYPE_IX(blocktype, mbs), block, SAD_TYPE_CT(blocktype)); } } fclose(outfile); } /* FILE I/O for debugging */ static void write_sads_directly(char *filename, int width, int height, unsigned short *sads) { FILE *f = fopen(filename, "w"); int n; write16u(f, width); write16u(f, height); for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) { write16u(f, sads[n]); } fclose(f); } static void print_test_sad_vector(unsigned short *base, int macroblock, int count) { int n; int searchpos = 17*33+17; for (n = 0; n < count; n++) printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]); } static void print_test_sads(unsigned short *sads_computed, int mbs) { int macroblock = 5; int blocktype; for (blocktype = 1; blocktype <= 7; blocktype++) { printf("%d:", blocktype); print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs), macroblock, SAD_TYPE_CT(blocktype)); puts("\n"); } } /* MAIN */ int main(int argc, char **argv) { struct image_i16 *ref_image; struct image_i16 *cur_image; unsigned short *sads_computed; /* SADs generated by the program */ int image_size_bytes; int image_width_macroblocks, image_height_macroblocks; int image_size_macroblocks; struct pb_TimerSet timers; struct pb_Parameters *params; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if (pb_Parameters_CountInputs(params) != 2) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); } /* Read input files */ pb_SwitchToTimer(&timers, pb_TimerID_IO); ref_image = load_image(params->inpFiles[0]); cur_image = load_image(params->inpFiles[1]); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if ((ref_image->width != cur_image->width) || (ref_image->height != cur_image->height)) { fprintf(stderr, "Input images must be the same size\n"); exit(-1); } if ((ref_image->width % 16) || (ref_image->height % 16)) { fprintf(stderr, "Input image size must be an integral multiple of 16\n"); exit(-1); } /* Compute parameters, allocate memory */ image_size_bytes = ref_image->width * ref_image->height * sizeof(short); image_width_macroblocks = ref_image->width >> 4; image_height_macroblocks = ref_image->height >> 4; image_size_macroblocks = image_width_macroblocks * image_height_macroblocks; sads_computed = (unsigned short *) malloc(41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short)); /* Run the kernel code */ { struct cudaArray *ref_ary; /* Reference image on the device */ short *d_cur_image; /* Current image on the device */ unsigned short *d_sads; /* SADs on the device */ dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMalloc((void **)&d_cur_image, image_size_bytes); CUDA_ERRCK cudaMallocArray(&ref_ary, &get_ref().channelDesc, ref_image->width, ref_image->height); CUDA_ERRCK /* Transfer current image to device */ cudaMemcpy(d_cur_image, cur_image->data, image_size_bytes, cudaMemcpyHostToDevice); CUDA_ERRCK /* Transfer reference image to device */ cudaMemcpy2DToArray(ref_ary, 0, 0, ref_image->data, ref_image->width * sizeof(unsigned short), ref_image->width * sizeof(unsigned short), ref_image->height, cudaMemcpyHostToDevice); CUDA_ERRCK cudaBindTextureToArray(get_ref(), ref_ary); CUDA_ERRCK /* Allocate SAD data on the device */ cudaMalloc((void **)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK cudaMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* std::cout memory footprint */ int memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads memory_footprint += image_size_bytes; //d_cur_image printf("\n#### mb_sad_calc memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ /* Run the 4x4 kernel */ mb_sad_calc<<<dim3(CEIL(ref_image->width / 4, THREADS_W), CEIL(ref_image->height / 4, THREADS_H)), dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H), SAD_LOC_SIZE_BYTES>>> (d_sads, (unsigned short *)d_cur_image, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* std::cout memory footprint */ memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads printf("\n#### larger_sad_calc_8 memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ /* Run the larger-blocks kernels */ larger_sad_calc_8<<<macroblock_grid, dim3(32, 4)>>> (d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* std::cout memory footprint */ memory_footprint = 0; memory_footprint += 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short); //d_sads printf("\n#### larger_sad_calc_16 memory_footprint:%d ####\n", memory_footprint); /* std::cout memory footprint */ larger_sad_calc_16<<<macroblock_grid, dim3(32, 1)>>> (d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Transfer SAD data to the host */ cudaMemcpy(sads_computed,// + 25 * MAX_POS_PADDED * image_size_macroblocks, d_sads,// + 25 * MAX_POS_PADDED * image_size_macroblocks, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short) , cudaMemcpyDeviceToHost); CUDA_ERRCK /* Free GPU memory */ cudaFree(d_sads); CUDA_ERRCK cudaUnbindTexture(get_ref()); CUDA_ERRCK cudaFreeArray(ref_ary); CUDA_ERRCK cudaFree(d_cur_image); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print output */ if (params->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); write_sads(params->outFile, image_width_macroblocks, image_height_macroblocks, sads_computed); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } #if 0 /* Debugging */ print_test_sads(sads_computed, image_size_macroblocks); write_sads_directly("sad-debug.bin", ref_image->width / 16, ref_image->height / 16, sads_computed); #endif /* Free memory */ free(sads_computed); free_image(ref_image); free_image(cur_image); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }